12545bca0SMatthew Dillon /*-
22545bca0SMatthew Dillon * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
32545bca0SMatthew Dillon *
42545bca0SMatthew Dillon * Copyright (c) 2005, WHEEL Sp. z o.o.
52545bca0SMatthew Dillon * Copyright (c) 2005 Justin T. Gibbs.
62545bca0SMatthew Dillon * All rights reserved.
72545bca0SMatthew Dillon *
82545bca0SMatthew Dillon * Redistribution and use in source and binary forms, with or without
92545bca0SMatthew Dillon * modification, are permitted provided that the following conditions are
102545bca0SMatthew Dillon * met:
112545bca0SMatthew Dillon * 1. Redistributions of source code must retain the above copyright
122545bca0SMatthew Dillon * notice, this list of conditions and the following disclaimer.
132545bca0SMatthew Dillon * 2. Redistributions in binary form must reproduce at minimum a disclaimer
142545bca0SMatthew Dillon * substantially similar to the "NO WARRANTY" disclaimer below
152545bca0SMatthew Dillon * ("Disclaimer") and any redistribution must be conditioned upon including
162545bca0SMatthew Dillon * a substantially similar Disclaimer requirement for further binary
172545bca0SMatthew Dillon * redistribution.
182545bca0SMatthew Dillon * 3. Neither the names of the above listed copyright holders nor the names
192545bca0SMatthew Dillon * of any contributors may be used to endorse or promote products derived
202545bca0SMatthew Dillon * from this software without specific prior written permission.
212545bca0SMatthew Dillon *
222545bca0SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
232545bca0SMatthew Dillon * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
242545bca0SMatthew Dillon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
252545bca0SMatthew Dillon * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
262545bca0SMatthew Dillon * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
272545bca0SMatthew Dillon * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
282545bca0SMatthew Dillon * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
292545bca0SMatthew Dillon * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
302545bca0SMatthew Dillon * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
312545bca0SMatthew Dillon * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
322545bca0SMatthew Dillon * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
332545bca0SMatthew Dillon */
342545bca0SMatthew Dillon /*-
352545bca0SMatthew Dillon * Some Breakage and Bug Fixing added later.
362545bca0SMatthew Dillon * Copyright (c) 2006, by Matthew Jacob
372545bca0SMatthew Dillon * All Rights Reserved
382545bca0SMatthew Dillon *
392545bca0SMatthew Dillon * Support from LSI-Logic has also gone a great deal toward making this a
402545bca0SMatthew Dillon * workable subsystem and is gratefully acknowledged.
4132af04f7SSascha Wildner *
42f582582cSSascha Wildner * $FreeBSD: head/sys/dev/mpt/mpt_raid.c 260058 2013-12-29 20:41:32Z marius $
432545bca0SMatthew Dillon */
442545bca0SMatthew Dillon
452545bca0SMatthew Dillon #include <dev/disk/mpt/mpt.h>
462545bca0SMatthew Dillon #include <dev/disk/mpt/mpt_raid.h>
472545bca0SMatthew Dillon
482545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
492545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_raid.h"
502545bca0SMatthew Dillon
512545bca0SMatthew Dillon #include <bus/cam/cam.h>
522545bca0SMatthew Dillon #include <bus/cam/cam_ccb.h>
53f582582cSSascha Wildner #include <bus/cam/cam_periph.h>
542545bca0SMatthew Dillon #include <bus/cam/cam_sim.h>
55*cec957e9SMatthew Dillon #include <bus/cam/cam_xpt.h>
562545bca0SMatthew Dillon #include <bus/cam/cam_xpt_sim.h>
57*cec957e9SMatthew Dillon #include <bus/cam/cam_xpt_periph.h>
582545bca0SMatthew Dillon
592545bca0SMatthew Dillon #include <sys/callout.h>
602545bca0SMatthew Dillon #include <sys/kthread.h>
612545bca0SMatthew Dillon #include <sys/sysctl.h>
622545bca0SMatthew Dillon
632545bca0SMatthew Dillon #include <machine/stdarg.h>
642545bca0SMatthew Dillon
652545bca0SMatthew Dillon struct mpt_raid_action_result
662545bca0SMatthew Dillon {
672545bca0SMatthew Dillon union {
682545bca0SMatthew Dillon MPI_RAID_VOL_INDICATOR indicator_struct;
692545bca0SMatthew Dillon uint32_t new_settings;
702545bca0SMatthew Dillon uint8_t phys_disk_num;
712545bca0SMatthew Dillon } action_data;
722545bca0SMatthew Dillon uint16_t action_status;
732545bca0SMatthew Dillon };
742545bca0SMatthew Dillon
752545bca0SMatthew Dillon #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
762545bca0SMatthew Dillon (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
772545bca0SMatthew Dillon
782545bca0SMatthew Dillon #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
792545bca0SMatthew Dillon
802545bca0SMatthew Dillon static mpt_probe_handler_t mpt_raid_probe;
812545bca0SMatthew Dillon static mpt_attach_handler_t mpt_raid_attach;
822545bca0SMatthew Dillon static mpt_enable_handler_t mpt_raid_enable;
832545bca0SMatthew Dillon static mpt_event_handler_t mpt_raid_event;
842545bca0SMatthew Dillon static mpt_shutdown_handler_t mpt_raid_shutdown;
852545bca0SMatthew Dillon static mpt_reset_handler_t mpt_raid_ioc_reset;
862545bca0SMatthew Dillon static mpt_detach_handler_t mpt_raid_detach;
872545bca0SMatthew Dillon
882545bca0SMatthew Dillon static struct mpt_personality mpt_raid_personality =
892545bca0SMatthew Dillon {
902545bca0SMatthew Dillon .name = "mpt_raid",
912545bca0SMatthew Dillon .probe = mpt_raid_probe,
922545bca0SMatthew Dillon .attach = mpt_raid_attach,
932545bca0SMatthew Dillon .enable = mpt_raid_enable,
942545bca0SMatthew Dillon .event = mpt_raid_event,
952545bca0SMatthew Dillon .reset = mpt_raid_ioc_reset,
962545bca0SMatthew Dillon .shutdown = mpt_raid_shutdown,
972545bca0SMatthew Dillon .detach = mpt_raid_detach,
982545bca0SMatthew Dillon };
992545bca0SMatthew Dillon
1002545bca0SMatthew Dillon DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
1012545bca0SMatthew Dillon MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
1022545bca0SMatthew Dillon
1032545bca0SMatthew Dillon static mpt_reply_handler_t mpt_raid_reply_handler;
1042545bca0SMatthew Dillon static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
1052545bca0SMatthew Dillon MSG_DEFAULT_REPLY *reply_frame);
1062545bca0SMatthew Dillon static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
1072545bca0SMatthew Dillon static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
1082545bca0SMatthew Dillon static void mpt_raid_thread(void *arg);
1092545bca0SMatthew Dillon static timeout_t mpt_raid_timer;
1102545bca0SMatthew Dillon #if 0
1112545bca0SMatthew Dillon static void mpt_enable_vol(struct mpt_softc *mpt,
1122545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol, int enable);
1132545bca0SMatthew Dillon #endif
1142545bca0SMatthew Dillon static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
1152545bca0SMatthew Dillon static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
1162545bca0SMatthew Dillon struct cam_path *);
1172545bca0SMatthew Dillon static void mpt_raid_sysctl_attach(struct mpt_softc *);
1182545bca0SMatthew Dillon
1194c42baf4SSascha Wildner static const char *mpt_vol_type(struct mpt_raid_volume *vol);
1204c42baf4SSascha Wildner static const char *mpt_vol_state(struct mpt_raid_volume *vol);
1214c42baf4SSascha Wildner static const char *mpt_disk_state(struct mpt_raid_disk *disk);
1224c42baf4SSascha Wildner static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
1239c646ab3SSascha Wildner const char *fmt, ...) __printflike(3, 4);
1244c42baf4SSascha Wildner static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
1259c646ab3SSascha Wildner const char *fmt, ...) __printflike(3, 4);
1264c42baf4SSascha Wildner
1274c42baf4SSascha Wildner static int mpt_issue_raid_req(struct mpt_softc *mpt,
1284c42baf4SSascha Wildner struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
1294c42baf4SSascha Wildner u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
1304c42baf4SSascha Wildner int write, int wait);
1314c42baf4SSascha Wildner
1324c42baf4SSascha Wildner static int mpt_refresh_raid_data(struct mpt_softc *mpt);
1334c42baf4SSascha Wildner static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
1344c42baf4SSascha Wildner
1352545bca0SMatthew Dillon static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
1362545bca0SMatthew Dillon
1374c42baf4SSascha Wildner static const char *
mpt_vol_type(struct mpt_raid_volume * vol)1382545bca0SMatthew Dillon mpt_vol_type(struct mpt_raid_volume *vol)
1392545bca0SMatthew Dillon {
1402545bca0SMatthew Dillon switch (vol->config_page->VolumeType) {
1412545bca0SMatthew Dillon case MPI_RAID_VOL_TYPE_IS:
1422545bca0SMatthew Dillon return ("RAID-0");
1432545bca0SMatthew Dillon case MPI_RAID_VOL_TYPE_IME:
1442545bca0SMatthew Dillon return ("RAID-1E");
1452545bca0SMatthew Dillon case MPI_RAID_VOL_TYPE_IM:
1462545bca0SMatthew Dillon return ("RAID-1");
1472545bca0SMatthew Dillon default:
1482545bca0SMatthew Dillon return ("Unknown");
1492545bca0SMatthew Dillon }
1502545bca0SMatthew Dillon }
1512545bca0SMatthew Dillon
1524c42baf4SSascha Wildner static const char *
mpt_vol_state(struct mpt_raid_volume * vol)1532545bca0SMatthew Dillon mpt_vol_state(struct mpt_raid_volume *vol)
1542545bca0SMatthew Dillon {
1552545bca0SMatthew Dillon switch (vol->config_page->VolumeStatus.State) {
1562545bca0SMatthew Dillon case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
1572545bca0SMatthew Dillon return ("Optimal");
1582545bca0SMatthew Dillon case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
1592545bca0SMatthew Dillon return ("Degraded");
1602545bca0SMatthew Dillon case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1612545bca0SMatthew Dillon return ("Failed");
1622545bca0SMatthew Dillon default:
1632545bca0SMatthew Dillon return ("Unknown");
1642545bca0SMatthew Dillon }
1652545bca0SMatthew Dillon }
1662545bca0SMatthew Dillon
1674c42baf4SSascha Wildner static const char *
mpt_disk_state(struct mpt_raid_disk * disk)1682545bca0SMatthew Dillon mpt_disk_state(struct mpt_raid_disk *disk)
1692545bca0SMatthew Dillon {
1702545bca0SMatthew Dillon switch (disk->config_page.PhysDiskStatus.State) {
1712545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_ONLINE:
1722545bca0SMatthew Dillon return ("Online");
1732545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_MISSING:
1742545bca0SMatthew Dillon return ("Missing");
1752545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1762545bca0SMatthew Dillon return ("Incompatible");
1772545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_FAILED:
1782545bca0SMatthew Dillon return ("Failed");
1792545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_INITIALIZING:
1802545bca0SMatthew Dillon return ("Initializing");
1812545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1822545bca0SMatthew Dillon return ("Offline Requested");
1832545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1842545bca0SMatthew Dillon return ("Failed per Host Request");
1852545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1862545bca0SMatthew Dillon return ("Offline");
1872545bca0SMatthew Dillon default:
1882545bca0SMatthew Dillon return ("Unknown");
1892545bca0SMatthew Dillon }
1902545bca0SMatthew Dillon }
1912545bca0SMatthew Dillon
1924c42baf4SSascha Wildner static void
mpt_vol_prt(struct mpt_softc * mpt,struct mpt_raid_volume * vol,const char * fmt,...)1932545bca0SMatthew Dillon mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
1942545bca0SMatthew Dillon const char *fmt, ...)
1952545bca0SMatthew Dillon {
1962545bca0SMatthew Dillon __va_list ap;
1972545bca0SMatthew Dillon
1982545bca0SMatthew Dillon kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1992545bca0SMatthew Dillon (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
2002545bca0SMatthew Dillon vol->config_page->VolumeBus, vol->config_page->VolumeID);
2012545bca0SMatthew Dillon __va_start(ap, fmt);
2022545bca0SMatthew Dillon kvprintf(fmt, ap);
2032545bca0SMatthew Dillon __va_end(ap);
2042545bca0SMatthew Dillon }
2052545bca0SMatthew Dillon
2064c42baf4SSascha Wildner static void
mpt_disk_prt(struct mpt_softc * mpt,struct mpt_raid_disk * disk,const char * fmt,...)2072545bca0SMatthew Dillon mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
2082545bca0SMatthew Dillon const char *fmt, ...)
2092545bca0SMatthew Dillon {
2102545bca0SMatthew Dillon __va_list ap;
2112545bca0SMatthew Dillon
2122545bca0SMatthew Dillon if (disk->volume != NULL) {
2132545bca0SMatthew Dillon kprintf("(%s:vol%d:%d): ",
2142545bca0SMatthew Dillon device_get_nameunit(mpt->dev),
2152545bca0SMatthew Dillon disk->volume->config_page->VolumeID,
2162545bca0SMatthew Dillon disk->member_number);
2172545bca0SMatthew Dillon } else {
2182545bca0SMatthew Dillon kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
2192545bca0SMatthew Dillon disk->config_page.PhysDiskBus,
2202545bca0SMatthew Dillon disk->config_page.PhysDiskID);
2212545bca0SMatthew Dillon }
2222545bca0SMatthew Dillon __va_start(ap, fmt);
2232545bca0SMatthew Dillon kvprintf(fmt, ap);
2242545bca0SMatthew Dillon __va_end(ap);
2252545bca0SMatthew Dillon }
2262545bca0SMatthew Dillon
2272545bca0SMatthew Dillon static void
mpt_raid_async(void * callback_arg,u_int32_t code,struct cam_path * path,void * arg)2282545bca0SMatthew Dillon mpt_raid_async(void *callback_arg, u_int32_t code,
2292545bca0SMatthew Dillon struct cam_path *path, void *arg)
2302545bca0SMatthew Dillon {
2312545bca0SMatthew Dillon struct mpt_softc *mpt;
2322545bca0SMatthew Dillon
2332545bca0SMatthew Dillon mpt = (struct mpt_softc*)callback_arg;
2342545bca0SMatthew Dillon switch (code) {
2352545bca0SMatthew Dillon case AC_FOUND_DEVICE:
2362545bca0SMatthew Dillon {
2372545bca0SMatthew Dillon struct ccb_getdev *cgd;
2382545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
2392545bca0SMatthew Dillon
2402545bca0SMatthew Dillon cgd = (struct ccb_getdev *)arg;
2412545bca0SMatthew Dillon if (cgd == NULL) {
2422545bca0SMatthew Dillon break;
2432545bca0SMatthew Dillon }
2442545bca0SMatthew Dillon
2452545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
2462545bca0SMatthew Dillon cgd->ccb_h.target_id);
2472545bca0SMatthew Dillon
2482545bca0SMatthew Dillon RAID_VOL_FOREACH(mpt, mpt_vol) {
2492545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
2502545bca0SMatthew Dillon continue;
2512545bca0SMatthew Dillon
2522545bca0SMatthew Dillon if (mpt_vol->config_page->VolumeID
2532545bca0SMatthew Dillon == cgd->ccb_h.target_id) {
2542545bca0SMatthew Dillon mpt_adjust_queue_depth(mpt, mpt_vol, path);
2552545bca0SMatthew Dillon break;
2562545bca0SMatthew Dillon }
2572545bca0SMatthew Dillon }
2582545bca0SMatthew Dillon }
2592545bca0SMatthew Dillon default:
2602545bca0SMatthew Dillon break;
2612545bca0SMatthew Dillon }
2622545bca0SMatthew Dillon }
2632545bca0SMatthew Dillon
2644c42baf4SSascha Wildner static int
mpt_raid_probe(struct mpt_softc * mpt)2652545bca0SMatthew Dillon mpt_raid_probe(struct mpt_softc *mpt)
2662545bca0SMatthew Dillon {
2674c42baf4SSascha Wildner
2682545bca0SMatthew Dillon if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
2692545bca0SMatthew Dillon return (ENODEV);
2702545bca0SMatthew Dillon }
2712545bca0SMatthew Dillon return (0);
2722545bca0SMatthew Dillon }
2732545bca0SMatthew Dillon
2744c42baf4SSascha Wildner static int
mpt_raid_attach(struct mpt_softc * mpt)2752545bca0SMatthew Dillon mpt_raid_attach(struct mpt_softc *mpt)
2762545bca0SMatthew Dillon {
277*cec957e9SMatthew Dillon struct ccb_setasync *csa;
2782545bca0SMatthew Dillon mpt_handler_t handler;
2792545bca0SMatthew Dillon int error;
2802545bca0SMatthew Dillon
2816d259fc1SSascha Wildner mpt_callout_init(mpt, &mpt->raid_timer);
2822545bca0SMatthew Dillon
2832545bca0SMatthew Dillon error = mpt_spawn_raid_thread(mpt);
2842545bca0SMatthew Dillon if (error != 0) {
2852545bca0SMatthew Dillon mpt_prt(mpt, "Unable to spawn RAID thread!\n");
2862545bca0SMatthew Dillon goto cleanup;
2872545bca0SMatthew Dillon }
288*cec957e9SMatthew Dillon csa = &xpt_alloc_ccb()->csa;
2892545bca0SMatthew Dillon
2902545bca0SMatthew Dillon MPT_LOCK(mpt);
2912545bca0SMatthew Dillon handler.reply_handler = mpt_raid_reply_handler;
2922545bca0SMatthew Dillon error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
2932545bca0SMatthew Dillon &raid_handler_id);
2942545bca0SMatthew Dillon if (error != 0) {
2952545bca0SMatthew Dillon mpt_prt(mpt, "Unable to register RAID haandler!\n");
2962545bca0SMatthew Dillon goto cleanup;
2972545bca0SMatthew Dillon }
2982545bca0SMatthew Dillon
299*cec957e9SMatthew Dillon xpt_setup_ccb(&csa->ccb_h, mpt->path, 5);
300*cec957e9SMatthew Dillon csa->ccb_h.func_code = XPT_SASYNC_CB;
301*cec957e9SMatthew Dillon csa->event_enable = AC_FOUND_DEVICE;
302*cec957e9SMatthew Dillon csa->callback = mpt_raid_async;
303*cec957e9SMatthew Dillon csa->callback_arg = mpt;
304*cec957e9SMatthew Dillon xpt_action((union ccb *)csa);
305*cec957e9SMatthew Dillon if (csa->ccb_h.status != CAM_REQ_CMP) {
3062545bca0SMatthew Dillon mpt_prt(mpt, "mpt_raid_attach: Unable to register "
3072545bca0SMatthew Dillon "CAM async handler.\n");
3082545bca0SMatthew Dillon }
3092545bca0SMatthew Dillon MPT_UNLOCK(mpt);
3102545bca0SMatthew Dillon
311*cec957e9SMatthew Dillon xpt_free_ccb(&csa->ccb_h);
3122545bca0SMatthew Dillon mpt_raid_sysctl_attach(mpt);
3132545bca0SMatthew Dillon return (0);
3142545bca0SMatthew Dillon cleanup:
3152545bca0SMatthew Dillon MPT_UNLOCK(mpt);
3162545bca0SMatthew Dillon mpt_raid_detach(mpt);
3172545bca0SMatthew Dillon return (error);
3182545bca0SMatthew Dillon }
3192545bca0SMatthew Dillon
3204c42baf4SSascha Wildner static int
mpt_raid_enable(struct mpt_softc * mpt)3212545bca0SMatthew Dillon mpt_raid_enable(struct mpt_softc *mpt)
3222545bca0SMatthew Dillon {
3234c42baf4SSascha Wildner
3242545bca0SMatthew Dillon return (0);
3252545bca0SMatthew Dillon }
3262545bca0SMatthew Dillon
3274c42baf4SSascha Wildner static void
mpt_raid_detach(struct mpt_softc * mpt)3282545bca0SMatthew Dillon mpt_raid_detach(struct mpt_softc *mpt)
3292545bca0SMatthew Dillon {
330*cec957e9SMatthew Dillon struct ccb_setasync *csa;
3312545bca0SMatthew Dillon mpt_handler_t handler;
3322545bca0SMatthew Dillon
333*cec957e9SMatthew Dillon csa = &xpt_alloc_ccb()->csa;
334f582582cSSascha Wildner mpt_callout_drain(mpt, &mpt->raid_timer);
3356d259fc1SSascha Wildner
3362545bca0SMatthew Dillon MPT_LOCK(mpt);
3372545bca0SMatthew Dillon mpt_terminate_raid_thread(mpt);
3382545bca0SMatthew Dillon handler.reply_handler = mpt_raid_reply_handler;
3392545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
3402545bca0SMatthew Dillon raid_handler_id);
341*cec957e9SMatthew Dillon xpt_setup_ccb(&csa->ccb_h, mpt->path, /*priority*/5);
342*cec957e9SMatthew Dillon csa->ccb_h.func_code = XPT_SASYNC_CB;
343*cec957e9SMatthew Dillon csa->event_enable = 0;
344*cec957e9SMatthew Dillon csa->callback = mpt_raid_async;
345*cec957e9SMatthew Dillon csa->callback_arg = mpt;
346*cec957e9SMatthew Dillon xpt_action((union ccb *)csa);
3472545bca0SMatthew Dillon MPT_UNLOCK(mpt);
348*cec957e9SMatthew Dillon
349*cec957e9SMatthew Dillon xpt_free_ccb(&csa->ccb_h);
3502545bca0SMatthew Dillon }
3512545bca0SMatthew Dillon
3522545bca0SMatthew Dillon static void
mpt_raid_ioc_reset(struct mpt_softc * mpt,int type)3532545bca0SMatthew Dillon mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
3542545bca0SMatthew Dillon {
3554c42baf4SSascha Wildner
3562545bca0SMatthew Dillon /* Nothing to do yet. */
3572545bca0SMatthew Dillon }
3582545bca0SMatthew Dillon
3592545bca0SMatthew Dillon static const char *raid_event_txt[] =
3602545bca0SMatthew Dillon {
3612545bca0SMatthew Dillon "Volume Created",
3622545bca0SMatthew Dillon "Volume Deleted",
3632545bca0SMatthew Dillon "Volume Settings Changed",
3642545bca0SMatthew Dillon "Volume Status Changed",
3652545bca0SMatthew Dillon "Volume Physical Disk Membership Changed",
3662545bca0SMatthew Dillon "Physical Disk Created",
3672545bca0SMatthew Dillon "Physical Disk Deleted",
3682545bca0SMatthew Dillon "Physical Disk Settings Changed",
3692545bca0SMatthew Dillon "Physical Disk Status Changed",
3702545bca0SMatthew Dillon "Domain Validation Required",
3712545bca0SMatthew Dillon "SMART Data Received",
3722545bca0SMatthew Dillon "Replace Action Started",
3732545bca0SMatthew Dillon };
3742545bca0SMatthew Dillon
3752545bca0SMatthew Dillon static int
mpt_raid_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)3762545bca0SMatthew Dillon mpt_raid_event(struct mpt_softc *mpt, request_t *req,
3772545bca0SMatthew Dillon MSG_EVENT_NOTIFY_REPLY *msg)
3782545bca0SMatthew Dillon {
3792545bca0SMatthew Dillon EVENT_DATA_RAID *raid_event;
3802545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
3812545bca0SMatthew Dillon struct mpt_raid_disk *mpt_disk;
3822545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
3832545bca0SMatthew Dillon int i;
3842545bca0SMatthew Dillon int print_event;
3852545bca0SMatthew Dillon
3862545bca0SMatthew Dillon if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
3872545bca0SMatthew Dillon return (0);
3882545bca0SMatthew Dillon }
3892545bca0SMatthew Dillon
3902545bca0SMatthew Dillon raid_event = (EVENT_DATA_RAID *)&msg->Data;
3912545bca0SMatthew Dillon
3922545bca0SMatthew Dillon mpt_vol = NULL;
3932545bca0SMatthew Dillon vol_pg = NULL;
3942545bca0SMatthew Dillon if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
3952545bca0SMatthew Dillon for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
3962545bca0SMatthew Dillon mpt_vol = &mpt->raid_volumes[i];
3972545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
3982545bca0SMatthew Dillon
3992545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
4002545bca0SMatthew Dillon continue;
4012545bca0SMatthew Dillon
4022545bca0SMatthew Dillon if (vol_pg->VolumeID == raid_event->VolumeID
4032545bca0SMatthew Dillon && vol_pg->VolumeBus == raid_event->VolumeBus)
4042545bca0SMatthew Dillon break;
4052545bca0SMatthew Dillon }
4062545bca0SMatthew Dillon if (i >= mpt->ioc_page2->MaxVolumes) {
4072545bca0SMatthew Dillon mpt_vol = NULL;
4082545bca0SMatthew Dillon vol_pg = NULL;
4092545bca0SMatthew Dillon }
4102545bca0SMatthew Dillon }
4112545bca0SMatthew Dillon
4122545bca0SMatthew Dillon mpt_disk = NULL;
4132545bca0SMatthew Dillon if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
4142545bca0SMatthew Dillon mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
4152545bca0SMatthew Dillon if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
4162545bca0SMatthew Dillon mpt_disk = NULL;
4172545bca0SMatthew Dillon }
4182545bca0SMatthew Dillon }
4192545bca0SMatthew Dillon
4202545bca0SMatthew Dillon print_event = 1;
4212545bca0SMatthew Dillon switch(raid_event->ReasonCode) {
4222545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_VOLUME_CREATED:
4232545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_VOLUME_DELETED:
4242545bca0SMatthew Dillon break;
4252545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
4262545bca0SMatthew Dillon if (mpt_vol != NULL) {
4272545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
4282545bca0SMatthew Dillon mpt_vol->flags &= ~MPT_RVF_UP2DATE;
4292545bca0SMatthew Dillon } else {
4302545bca0SMatthew Dillon /*
4312545bca0SMatthew Dillon * Coalesce status messages into one
4322545bca0SMatthew Dillon * per background run of our RAID thread.
4332545bca0SMatthew Dillon * This removes "spurious" status messages
4342545bca0SMatthew Dillon * from our output.
4352545bca0SMatthew Dillon */
4362545bca0SMatthew Dillon print_event = 0;
4372545bca0SMatthew Dillon }
4382545bca0SMatthew Dillon }
4392545bca0SMatthew Dillon break;
4402545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
4412545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
4422545bca0SMatthew Dillon mpt->raid_rescan++;
4432545bca0SMatthew Dillon if (mpt_vol != NULL) {
4442545bca0SMatthew Dillon mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
4452545bca0SMatthew Dillon }
4462545bca0SMatthew Dillon break;
4472545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
4482545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
4492545bca0SMatthew Dillon mpt->raid_rescan++;
4502545bca0SMatthew Dillon break;
4512545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
4522545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
4532545bca0SMatthew Dillon mpt->raid_rescan++;
4542545bca0SMatthew Dillon if (mpt_disk != NULL) {
4552545bca0SMatthew Dillon mpt_disk->flags &= ~MPT_RDF_UP2DATE;
4562545bca0SMatthew Dillon }
4572545bca0SMatthew Dillon break;
4582545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
4592545bca0SMatthew Dillon mpt->raid_rescan++;
4602545bca0SMatthew Dillon break;
4612545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_SMART_DATA:
4622545bca0SMatthew Dillon case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
4632545bca0SMatthew Dillon break;
4642545bca0SMatthew Dillon }
4652545bca0SMatthew Dillon
4662545bca0SMatthew Dillon if (print_event) {
4672545bca0SMatthew Dillon if (mpt_disk != NULL) {
4688eab68faSSascha Wildner mpt_disk_prt(mpt, mpt_disk, "%s", "");
4692545bca0SMatthew Dillon } else if (mpt_vol != NULL) {
4708eab68faSSascha Wildner mpt_vol_prt(mpt, mpt_vol, "%s", "");
4712545bca0SMatthew Dillon } else {
4722545bca0SMatthew Dillon mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
4732545bca0SMatthew Dillon raid_event->VolumeID);
4742545bca0SMatthew Dillon
4752545bca0SMatthew Dillon if (raid_event->PhysDiskNum != 0xFF)
4762545bca0SMatthew Dillon mpt_prtc(mpt, ":%d): ",
4772545bca0SMatthew Dillon raid_event->PhysDiskNum);
4782545bca0SMatthew Dillon else
4792545bca0SMatthew Dillon mpt_prtc(mpt, "): ");
4802545bca0SMatthew Dillon }
4812545bca0SMatthew Dillon
4822545bca0SMatthew Dillon if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
4832545bca0SMatthew Dillon mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
4842545bca0SMatthew Dillon raid_event->ReasonCode);
4852545bca0SMatthew Dillon else
4862545bca0SMatthew Dillon mpt_prtc(mpt, "%s\n",
4872545bca0SMatthew Dillon raid_event_txt[raid_event->ReasonCode]);
4882545bca0SMatthew Dillon }
4892545bca0SMatthew Dillon
4902545bca0SMatthew Dillon if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
4912545bca0SMatthew Dillon /* XXX Use CAM's print sense for this... */
4922545bca0SMatthew Dillon if (mpt_disk != NULL)
4938eab68faSSascha Wildner mpt_disk_prt(mpt, mpt_disk, "%s", "");
4942545bca0SMatthew Dillon else
4952545bca0SMatthew Dillon mpt_prt(mpt, "Volume(%d:%d:%d: ",
4962545bca0SMatthew Dillon raid_event->VolumeBus, raid_event->VolumeID,
4972545bca0SMatthew Dillon raid_event->PhysDiskNum);
4982545bca0SMatthew Dillon mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
4992545bca0SMatthew Dillon raid_event->ASC, raid_event->ASCQ);
5002545bca0SMatthew Dillon }
5012545bca0SMatthew Dillon
5022545bca0SMatthew Dillon mpt_raid_wakeup(mpt);
5032545bca0SMatthew Dillon return (1);
5042545bca0SMatthew Dillon }
5052545bca0SMatthew Dillon
5062545bca0SMatthew Dillon static void
mpt_raid_shutdown(struct mpt_softc * mpt)5072545bca0SMatthew Dillon mpt_raid_shutdown(struct mpt_softc *mpt)
5082545bca0SMatthew Dillon {
5092545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
5102545bca0SMatthew Dillon
5112545bca0SMatthew Dillon if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
5122545bca0SMatthew Dillon return;
5132545bca0SMatthew Dillon }
5142545bca0SMatthew Dillon
5152545bca0SMatthew Dillon mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
5162545bca0SMatthew Dillon RAID_VOL_FOREACH(mpt, mpt_vol) {
5172545bca0SMatthew Dillon mpt_verify_mwce(mpt, mpt_vol);
5182545bca0SMatthew Dillon }
5192545bca0SMatthew Dillon }
5202545bca0SMatthew Dillon
5212545bca0SMatthew Dillon static int
mpt_raid_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)5222545bca0SMatthew Dillon mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
5232545bca0SMatthew Dillon uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5242545bca0SMatthew Dillon {
5252545bca0SMatthew Dillon int free_req;
5262545bca0SMatthew Dillon
5272545bca0SMatthew Dillon if (req == NULL)
5282545bca0SMatthew Dillon return (TRUE);
5292545bca0SMatthew Dillon
5302545bca0SMatthew Dillon free_req = TRUE;
5312545bca0SMatthew Dillon if (reply_frame != NULL)
5322545bca0SMatthew Dillon free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
5332545bca0SMatthew Dillon #ifdef NOTYET
5342545bca0SMatthew Dillon else if (req->ccb != NULL) {
5352545bca0SMatthew Dillon /* Complete Quiesce CCB with error... */
5362545bca0SMatthew Dillon }
5372545bca0SMatthew Dillon #endif
5382545bca0SMatthew Dillon
5392545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED;
5402545bca0SMatthew Dillon req->state |= REQ_STATE_DONE;
5412545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5422545bca0SMatthew Dillon
5432545bca0SMatthew Dillon if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
5442545bca0SMatthew Dillon wakeup(req);
5452545bca0SMatthew Dillon } else if (free_req) {
5462545bca0SMatthew Dillon mpt_free_request(mpt, req);
5472545bca0SMatthew Dillon }
5482545bca0SMatthew Dillon
5492545bca0SMatthew Dillon return (TRUE);
5502545bca0SMatthew Dillon }
5512545bca0SMatthew Dillon
5522545bca0SMatthew Dillon /*
5532545bca0SMatthew Dillon * Parse additional completion information in the reply
5542545bca0SMatthew Dillon * frame for RAID I/O requests.
5552545bca0SMatthew Dillon */
5562545bca0SMatthew Dillon static int
mpt_raid_reply_frame_handler(struct mpt_softc * mpt,request_t * req,MSG_DEFAULT_REPLY * reply_frame)5572545bca0SMatthew Dillon mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
5582545bca0SMatthew Dillon MSG_DEFAULT_REPLY *reply_frame)
5592545bca0SMatthew Dillon {
5602545bca0SMatthew Dillon MSG_RAID_ACTION_REPLY *reply;
5612545bca0SMatthew Dillon struct mpt_raid_action_result *action_result;
5622545bca0SMatthew Dillon MSG_RAID_ACTION_REQUEST *rap;
5632545bca0SMatthew Dillon
5642545bca0SMatthew Dillon reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
5652545bca0SMatthew Dillon req->IOCStatus = le16toh(reply->IOCStatus);
5662545bca0SMatthew Dillon rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
5672545bca0SMatthew Dillon
5682545bca0SMatthew Dillon switch (rap->Action) {
5692545bca0SMatthew Dillon case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
5702545bca0SMatthew Dillon mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
5712545bca0SMatthew Dillon break;
5722545bca0SMatthew Dillon case MPI_RAID_ACTION_ENABLE_PHYS_IO:
5732545bca0SMatthew Dillon mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
5742545bca0SMatthew Dillon break;
5752545bca0SMatthew Dillon default:
5762545bca0SMatthew Dillon break;
5772545bca0SMatthew Dillon }
5782545bca0SMatthew Dillon action_result = REQ_TO_RAID_ACTION_RESULT(req);
5792545bca0SMatthew Dillon memcpy(&action_result->action_data, &reply->ActionData,
5802545bca0SMatthew Dillon sizeof(action_result->action_data));
5812545bca0SMatthew Dillon action_result->action_status = le16toh(reply->ActionStatus);
5822545bca0SMatthew Dillon return (TRUE);
5832545bca0SMatthew Dillon }
5842545bca0SMatthew Dillon
5852545bca0SMatthew Dillon /*
5862545bca0SMatthew Dillon * Utiltity routine to perform a RAID action command;
5872545bca0SMatthew Dillon */
5884c42baf4SSascha Wildner static int
mpt_issue_raid_req(struct mpt_softc * mpt,struct mpt_raid_volume * vol,struct mpt_raid_disk * disk,request_t * req,u_int Action,uint32_t ActionDataWord,bus_addr_t addr,bus_size_t len,int write,int wait)5892545bca0SMatthew Dillon mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
5902545bca0SMatthew Dillon struct mpt_raid_disk *disk, request_t *req, u_int Action,
5912545bca0SMatthew Dillon uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
5922545bca0SMatthew Dillon int write, int wait)
5932545bca0SMatthew Dillon {
5942545bca0SMatthew Dillon MSG_RAID_ACTION_REQUEST *rap;
5952545bca0SMatthew Dillon SGE_SIMPLE32 *se;
5962545bca0SMatthew Dillon
5972545bca0SMatthew Dillon rap = req->req_vbuf;
5982545bca0SMatthew Dillon memset(rap, 0, sizeof *rap);
5992545bca0SMatthew Dillon rap->Action = Action;
6002545bca0SMatthew Dillon rap->ActionDataWord = htole32(ActionDataWord);
6012545bca0SMatthew Dillon rap->Function = MPI_FUNCTION_RAID_ACTION;
6022545bca0SMatthew Dillon rap->VolumeID = vol->config_page->VolumeID;
6032545bca0SMatthew Dillon rap->VolumeBus = vol->config_page->VolumeBus;
6044090d6ffSSascha Wildner if (disk != NULL)
6052545bca0SMatthew Dillon rap->PhysDiskNum = disk->config_page.PhysDiskNum;
6062545bca0SMatthew Dillon else
6072545bca0SMatthew Dillon rap->PhysDiskNum = 0xFF;
6082545bca0SMatthew Dillon se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
6092545bca0SMatthew Dillon se->Address = htole32(addr);
6102545bca0SMatthew Dillon MPI_pSGE_SET_LENGTH(se, len);
6112545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
6122545bca0SMatthew Dillon MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
6132545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_LIST |
6147c25255aSSascha Wildner (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
6152545bca0SMatthew Dillon se->FlagsLength = htole32(se->FlagsLength);
6162545bca0SMatthew Dillon rap->MsgContext = htole32(req->index | raid_handler_id);
6172545bca0SMatthew Dillon
6182545bca0SMatthew Dillon mpt_check_doorbell(mpt);
6192545bca0SMatthew Dillon mpt_send_cmd(mpt, req);
6202545bca0SMatthew Dillon
6212545bca0SMatthew Dillon if (wait) {
6222545bca0SMatthew Dillon return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
6232545bca0SMatthew Dillon /*sleep_ok*/FALSE, /*time_ms*/2000));
6242545bca0SMatthew Dillon } else {
6252545bca0SMatthew Dillon return (0);
6262545bca0SMatthew Dillon }
6272545bca0SMatthew Dillon }
6282545bca0SMatthew Dillon
6292545bca0SMatthew Dillon /*************************** RAID Status Monitoring ***************************/
6302545bca0SMatthew Dillon static int
mpt_spawn_raid_thread(struct mpt_softc * mpt)6312545bca0SMatthew Dillon mpt_spawn_raid_thread(struct mpt_softc *mpt)
6322545bca0SMatthew Dillon {
6332545bca0SMatthew Dillon int error;
6342545bca0SMatthew Dillon
6352545bca0SMatthew Dillon /*
6362545bca0SMatthew Dillon * Freeze out any CAM transactions until our thread
6372545bca0SMatthew Dillon * is able to run at least once. We need to update
6382545bca0SMatthew Dillon * our RAID pages before acception I/O or we may
6392545bca0SMatthew Dillon * reject I/O to an ID we later determine is for a
6402545bca0SMatthew Dillon * hidden physdisk.
6412545bca0SMatthew Dillon */
6422545bca0SMatthew Dillon MPT_LOCK(mpt);
6432545bca0SMatthew Dillon xpt_freeze_simq(mpt->phydisk_sim, 1);
6442545bca0SMatthew Dillon MPT_UNLOCK(mpt);
645f582582cSSascha Wildner error = kthread_create(mpt_raid_thread, mpt,
646f582582cSSascha Wildner &mpt->raid_thread, "mpt_raid%d", mpt->unit);
6472545bca0SMatthew Dillon if (error != 0) {
6482545bca0SMatthew Dillon MPT_LOCK(mpt);
6492545bca0SMatthew Dillon xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
6502545bca0SMatthew Dillon MPT_UNLOCK(mpt);
6512545bca0SMatthew Dillon }
6522545bca0SMatthew Dillon return (error);
6532545bca0SMatthew Dillon }
6542545bca0SMatthew Dillon
6552545bca0SMatthew Dillon static void
mpt_terminate_raid_thread(struct mpt_softc * mpt)6562545bca0SMatthew Dillon mpt_terminate_raid_thread(struct mpt_softc *mpt)
6572545bca0SMatthew Dillon {
6582545bca0SMatthew Dillon
6592545bca0SMatthew Dillon if (mpt->raid_thread == NULL) {
6602545bca0SMatthew Dillon return;
6612545bca0SMatthew Dillon }
6622545bca0SMatthew Dillon mpt->shutdwn_raid = 1;
6636d259fc1SSascha Wildner wakeup(&mpt->raid_volumes);
6642545bca0SMatthew Dillon /*
6652545bca0SMatthew Dillon * Sleep on a slightly different location
6662545bca0SMatthew Dillon * for this interlock just for added safety.
6672545bca0SMatthew Dillon */
6686d259fc1SSascha Wildner mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
6692545bca0SMatthew Dillon }
6702545bca0SMatthew Dillon
6712545bca0SMatthew Dillon static void
mpt_cam_rescan_callback(struct cam_periph * periph,union ccb * ccb)6722545bca0SMatthew Dillon mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
6732545bca0SMatthew Dillon {
6742545bca0SMatthew Dillon xpt_free_path(ccb->ccb_h.path);
6756d259fc1SSascha Wildner kfree(ccb, M_TEMP);
6762545bca0SMatthew Dillon }
6772545bca0SMatthew Dillon
6782545bca0SMatthew Dillon static void
mpt_raid_thread(void * arg)6792545bca0SMatthew Dillon mpt_raid_thread(void *arg)
6802545bca0SMatthew Dillon {
6812545bca0SMatthew Dillon struct mpt_softc *mpt;
6822545bca0SMatthew Dillon int firstrun;
6832545bca0SMatthew Dillon
6842545bca0SMatthew Dillon mpt = (struct mpt_softc *)arg;
6852545bca0SMatthew Dillon firstrun = 1;
6862545bca0SMatthew Dillon MPT_LOCK(mpt);
6872545bca0SMatthew Dillon while (mpt->shutdwn_raid == 0) {
6882545bca0SMatthew Dillon
6892545bca0SMatthew Dillon if (mpt->raid_wakeup == 0) {
6906d259fc1SSascha Wildner mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
6912545bca0SMatthew Dillon continue;
6922545bca0SMatthew Dillon }
6932545bca0SMatthew Dillon
6942545bca0SMatthew Dillon mpt->raid_wakeup = 0;
6952545bca0SMatthew Dillon
6962545bca0SMatthew Dillon if (mpt_refresh_raid_data(mpt)) {
6972545bca0SMatthew Dillon mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
6982545bca0SMatthew Dillon continue;
6992545bca0SMatthew Dillon }
7002545bca0SMatthew Dillon
7012545bca0SMatthew Dillon /*
7022545bca0SMatthew Dillon * Now that we have our first snapshot of RAID data,
7032545bca0SMatthew Dillon * allow CAM to access our physical disk bus.
7042545bca0SMatthew Dillon */
7052545bca0SMatthew Dillon if (firstrun) {
7062545bca0SMatthew Dillon firstrun = 0;
7072545bca0SMatthew Dillon xpt_release_simq(mpt->phydisk_sim, TRUE);
7082545bca0SMatthew Dillon }
7092545bca0SMatthew Dillon
7102545bca0SMatthew Dillon if (mpt->raid_rescan != 0) {
7112545bca0SMatthew Dillon union ccb *ccb;
7122545bca0SMatthew Dillon int error;
7132545bca0SMatthew Dillon
7142545bca0SMatthew Dillon mpt->raid_rescan = 0;
7152545bca0SMatthew Dillon MPT_UNLOCK(mpt);
7162545bca0SMatthew Dillon
7176d259fc1SSascha Wildner ccb = kmalloc(sizeof(union ccb), M_TEMP,
7186d259fc1SSascha Wildner M_WAITOK | M_ZERO);
7192545bca0SMatthew Dillon
7202545bca0SMatthew Dillon MPT_LOCK(mpt);
7216d259fc1SSascha Wildner error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
7222545bca0SMatthew Dillon cam_sim_path(mpt->phydisk_sim),
7232545bca0SMatthew Dillon CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
7242545bca0SMatthew Dillon if (error != CAM_REQ_CMP) {
7256d259fc1SSascha Wildner kfree(ccb, M_TEMP);
7262545bca0SMatthew Dillon mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
7272545bca0SMatthew Dillon } else {
728*cec957e9SMatthew Dillon xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
7292545bca0SMatthew Dillon ccb->ccb_h.func_code = XPT_SCAN_BUS;
7302545bca0SMatthew Dillon ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
7312545bca0SMatthew Dillon ccb->crcn.flags = CAM_FLAG_NONE;
7322545bca0SMatthew Dillon xpt_action(ccb);
7336d259fc1SSascha Wildner
7346d259fc1SSascha Wildner /* scan is now in progress */
7352545bca0SMatthew Dillon }
7362545bca0SMatthew Dillon }
7372545bca0SMatthew Dillon }
7382545bca0SMatthew Dillon mpt->raid_thread = NULL;
7392545bca0SMatthew Dillon wakeup(&mpt->raid_thread);
7402545bca0SMatthew Dillon MPT_UNLOCK(mpt);
741f582582cSSascha Wildner kthread_exit();
7422545bca0SMatthew Dillon }
7432545bca0SMatthew Dillon
7442545bca0SMatthew Dillon #if 0
7452545bca0SMatthew Dillon static void
7462545bca0SMatthew Dillon mpt_raid_quiesce_timeout(void *arg)
7472545bca0SMatthew Dillon {
7484c42baf4SSascha Wildner
7492545bca0SMatthew Dillon /* Complete the CCB with error */
7502545bca0SMatthew Dillon /* COWWWW */
7512545bca0SMatthew Dillon }
7522545bca0SMatthew Dillon
7532545bca0SMatthew Dillon static timeout_t mpt_raid_quiesce_timeout;
7542545bca0SMatthew Dillon cam_status
7552545bca0SMatthew Dillon mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
7562545bca0SMatthew Dillon request_t *req)
7572545bca0SMatthew Dillon {
7582545bca0SMatthew Dillon union ccb *ccb;
7592545bca0SMatthew Dillon
7602545bca0SMatthew Dillon ccb = req->ccb;
7612545bca0SMatthew Dillon if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
7622545bca0SMatthew Dillon return (CAM_REQ_CMP);
7632545bca0SMatthew Dillon
7642545bca0SMatthew Dillon if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
7652545bca0SMatthew Dillon int rv;
7662545bca0SMatthew Dillon
7672545bca0SMatthew Dillon mpt_disk->flags |= MPT_RDF_QUIESCING;
7682545bca0SMatthew Dillon xpt_freeze_devq(ccb->ccb_h.path, 1);
7692545bca0SMatthew Dillon
7702545bca0SMatthew Dillon rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
7712545bca0SMatthew Dillon MPI_RAID_ACTION_QUIESCE_PHYS_IO,
7722545bca0SMatthew Dillon /*ActionData*/0, /*addr*/0,
7732545bca0SMatthew Dillon /*len*/0, /*write*/FALSE,
7742545bca0SMatthew Dillon /*wait*/FALSE);
7752545bca0SMatthew Dillon if (rv != 0)
7762545bca0SMatthew Dillon return (CAM_REQ_CMP_ERR);
7772545bca0SMatthew Dillon
7782545bca0SMatthew Dillon mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
7792545bca0SMatthew Dillon #if 0
7802545bca0SMatthew Dillon if (rv == ETIMEDOUT) {
7812545bca0SMatthew Dillon mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
7822545bca0SMatthew Dillon "Quiece Timed-out\n");
7832545bca0SMatthew Dillon xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
7842545bca0SMatthew Dillon return (CAM_REQ_CMP_ERR);
7852545bca0SMatthew Dillon }
7862545bca0SMatthew Dillon
7872545bca0SMatthew Dillon ar = REQ_TO_RAID_ACTION_RESULT(req);
7882545bca0SMatthew Dillon if (rv != 0
7892545bca0SMatthew Dillon || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
7902545bca0SMatthew Dillon || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
7912545bca0SMatthew Dillon mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
7922545bca0SMatthew Dillon "%d:%x:%x\n", rv, req->IOCStatus,
7932545bca0SMatthew Dillon ar->action_status);
7942545bca0SMatthew Dillon xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
7952545bca0SMatthew Dillon return (CAM_REQ_CMP_ERR);
7962545bca0SMatthew Dillon }
7972545bca0SMatthew Dillon #endif
7982545bca0SMatthew Dillon return (CAM_REQ_INPROG);
7992545bca0SMatthew Dillon }
8002545bca0SMatthew Dillon return (CAM_REQUEUE_REQ);
8012545bca0SMatthew Dillon }
8022545bca0SMatthew Dillon #endif
8032545bca0SMatthew Dillon
8042545bca0SMatthew Dillon /* XXX Ignores that there may be multiple busses/IOCs involved. */
8052545bca0SMatthew Dillon cam_status
mpt_map_physdisk(struct mpt_softc * mpt,union ccb * ccb,target_id_t * tgt)8064c42baf4SSascha Wildner mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
8072545bca0SMatthew Dillon {
8082545bca0SMatthew Dillon struct mpt_raid_disk *mpt_disk;
8092545bca0SMatthew Dillon
8102545bca0SMatthew Dillon mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
8112545bca0SMatthew Dillon if (ccb->ccb_h.target_id < mpt->raid_max_disks
8122545bca0SMatthew Dillon && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
8132545bca0SMatthew Dillon *tgt = mpt_disk->config_page.PhysDiskID;
8142545bca0SMatthew Dillon return (0);
8152545bca0SMatthew Dillon }
8162545bca0SMatthew Dillon mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
8172545bca0SMatthew Dillon ccb->ccb_h.target_id);
8182545bca0SMatthew Dillon return (-1);
8192545bca0SMatthew Dillon }
8202545bca0SMatthew Dillon
8212545bca0SMatthew Dillon /* XXX Ignores that there may be multiple busses/IOCs involved. */
8222545bca0SMatthew Dillon int
mpt_is_raid_member(struct mpt_softc * mpt,target_id_t tgt)8234c42baf4SSascha Wildner mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
8244c42baf4SSascha Wildner {
8254c42baf4SSascha Wildner struct mpt_raid_disk *mpt_disk;
8264c42baf4SSascha Wildner int i;
8274c42baf4SSascha Wildner
8284c42baf4SSascha Wildner if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
8294c42baf4SSascha Wildner return (0);
8304c42baf4SSascha Wildner for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
8314c42baf4SSascha Wildner mpt_disk = &mpt->raid_disks[i];
8324c42baf4SSascha Wildner if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
8334c42baf4SSascha Wildner mpt_disk->config_page.PhysDiskID == tgt)
8344c42baf4SSascha Wildner return (1);
8354c42baf4SSascha Wildner }
8364c42baf4SSascha Wildner return (0);
8374c42baf4SSascha Wildner
8384c42baf4SSascha Wildner }
8394c42baf4SSascha Wildner
8404c42baf4SSascha Wildner /* XXX Ignores that there may be multiple busses/IOCs involved. */
8414c42baf4SSascha Wildner int
mpt_is_raid_volume(struct mpt_softc * mpt,target_id_t tgt)8424c42baf4SSascha Wildner mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
8432545bca0SMatthew Dillon {
8442545bca0SMatthew Dillon CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
8452545bca0SMatthew Dillon CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
8462545bca0SMatthew Dillon
8472545bca0SMatthew Dillon if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
8482545bca0SMatthew Dillon return (0);
8492545bca0SMatthew Dillon }
8502545bca0SMatthew Dillon ioc_vol = mpt->ioc_page2->RaidVolume;
8512545bca0SMatthew Dillon ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
8522545bca0SMatthew Dillon for (;ioc_vol != ioc_last_vol; ioc_vol++) {
8532545bca0SMatthew Dillon if (ioc_vol->VolumeID == tgt) {
8542545bca0SMatthew Dillon return (1);
8552545bca0SMatthew Dillon }
8562545bca0SMatthew Dillon }
8572545bca0SMatthew Dillon return (0);
8582545bca0SMatthew Dillon }
8592545bca0SMatthew Dillon
8602545bca0SMatthew Dillon #if 0
8612545bca0SMatthew Dillon static void
8622545bca0SMatthew Dillon mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
8632545bca0SMatthew Dillon int enable)
8642545bca0SMatthew Dillon {
8652545bca0SMatthew Dillon request_t *req;
8662545bca0SMatthew Dillon struct mpt_raid_action_result *ar;
8672545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
8682545bca0SMatthew Dillon int enabled;
8692545bca0SMatthew Dillon int rv;
8702545bca0SMatthew Dillon
8712545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
8722545bca0SMatthew Dillon enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
8732545bca0SMatthew Dillon
8742545bca0SMatthew Dillon /*
8752545bca0SMatthew Dillon * If the setting matches the configuration,
8762545bca0SMatthew Dillon * there is nothing to do.
8772545bca0SMatthew Dillon */
8782545bca0SMatthew Dillon if ((enabled && enable)
8792545bca0SMatthew Dillon || (!enabled && !enable))
8802545bca0SMatthew Dillon return;
8812545bca0SMatthew Dillon
8822545bca0SMatthew Dillon req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
8832545bca0SMatthew Dillon if (req == NULL) {
8842545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol,
8852545bca0SMatthew Dillon "mpt_enable_vol: Get request failed!\n");
8862545bca0SMatthew Dillon return;
8872545bca0SMatthew Dillon }
8882545bca0SMatthew Dillon
8892545bca0SMatthew Dillon rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
8902545bca0SMatthew Dillon enable ? MPI_RAID_ACTION_ENABLE_VOLUME
8912545bca0SMatthew Dillon : MPI_RAID_ACTION_DISABLE_VOLUME,
8922545bca0SMatthew Dillon /*data*/0, /*addr*/0, /*len*/0,
8932545bca0SMatthew Dillon /*write*/FALSE, /*wait*/TRUE);
8942545bca0SMatthew Dillon if (rv == ETIMEDOUT) {
8952545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
8962545bca0SMatthew Dillon "%s Volume Timed-out\n",
8972545bca0SMatthew Dillon enable ? "Enable" : "Disable");
8982545bca0SMatthew Dillon return;
8992545bca0SMatthew Dillon }
9002545bca0SMatthew Dillon ar = REQ_TO_RAID_ACTION_RESULT(req);
9012545bca0SMatthew Dillon if (rv != 0
9022545bca0SMatthew Dillon || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
9032545bca0SMatthew Dillon || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
9042545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
9052545bca0SMatthew Dillon enable ? "Enable" : "Disable",
9062545bca0SMatthew Dillon rv, req->IOCStatus, ar->action_status);
9072545bca0SMatthew Dillon }
9082545bca0SMatthew Dillon
9092545bca0SMatthew Dillon mpt_free_request(mpt, req);
9102545bca0SMatthew Dillon }
9112545bca0SMatthew Dillon #endif
9122545bca0SMatthew Dillon
9132545bca0SMatthew Dillon static void
mpt_verify_mwce(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)9142545bca0SMatthew Dillon mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
9152545bca0SMatthew Dillon {
9162545bca0SMatthew Dillon request_t *req;
9172545bca0SMatthew Dillon struct mpt_raid_action_result *ar;
9182545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
9192545bca0SMatthew Dillon uint32_t data;
9202545bca0SMatthew Dillon int rv;
9212545bca0SMatthew Dillon int resyncing;
9222545bca0SMatthew Dillon int mwce;
9232545bca0SMatthew Dillon
9242545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
9252545bca0SMatthew Dillon resyncing = vol_pg->VolumeStatus.Flags
9262545bca0SMatthew Dillon & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
9272545bca0SMatthew Dillon mwce = vol_pg->VolumeSettings.Settings
9282545bca0SMatthew Dillon & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
9292545bca0SMatthew Dillon
9302545bca0SMatthew Dillon /*
9312545bca0SMatthew Dillon * If the setting matches the configuration,
9322545bca0SMatthew Dillon * there is nothing to do.
9332545bca0SMatthew Dillon */
9342545bca0SMatthew Dillon switch (mpt->raid_mwce_setting) {
9352545bca0SMatthew Dillon case MPT_RAID_MWCE_REBUILD_ONLY:
9362545bca0SMatthew Dillon if ((resyncing && mwce) || (!resyncing && !mwce)) {
9372545bca0SMatthew Dillon return;
9382545bca0SMatthew Dillon }
9392545bca0SMatthew Dillon mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
9402545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
9412545bca0SMatthew Dillon /*
9422545bca0SMatthew Dillon * Wait one more status update to see if
9432545bca0SMatthew Dillon * resyncing gets enabled. It gets disabled
9442545bca0SMatthew Dillon * temporarilly when WCE is changed.
9452545bca0SMatthew Dillon */
9462545bca0SMatthew Dillon return;
9472545bca0SMatthew Dillon }
9482545bca0SMatthew Dillon break;
9492545bca0SMatthew Dillon case MPT_RAID_MWCE_ON:
9502545bca0SMatthew Dillon if (mwce)
9512545bca0SMatthew Dillon return;
9522545bca0SMatthew Dillon break;
9532545bca0SMatthew Dillon case MPT_RAID_MWCE_OFF:
9542545bca0SMatthew Dillon if (!mwce)
9552545bca0SMatthew Dillon return;
9562545bca0SMatthew Dillon break;
9572545bca0SMatthew Dillon case MPT_RAID_MWCE_NC:
9582545bca0SMatthew Dillon return;
9592545bca0SMatthew Dillon }
9602545bca0SMatthew Dillon
9612545bca0SMatthew Dillon req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
9622545bca0SMatthew Dillon if (req == NULL) {
9632545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol,
9642545bca0SMatthew Dillon "mpt_verify_mwce: Get request failed!\n");
9652545bca0SMatthew Dillon return;
9662545bca0SMatthew Dillon }
9672545bca0SMatthew Dillon
9682545bca0SMatthew Dillon vol_pg->VolumeSettings.Settings ^=
9692545bca0SMatthew Dillon MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
9702545bca0SMatthew Dillon memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
9712545bca0SMatthew Dillon vol_pg->VolumeSettings.Settings ^=
9722545bca0SMatthew Dillon MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
9732545bca0SMatthew Dillon rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
9742545bca0SMatthew Dillon MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
9752545bca0SMatthew Dillon data, /*addr*/0, /*len*/0,
9762545bca0SMatthew Dillon /*write*/FALSE, /*wait*/TRUE);
9772545bca0SMatthew Dillon if (rv == ETIMEDOUT) {
9782545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
9792545bca0SMatthew Dillon "Write Cache Enable Timed-out\n");
9802545bca0SMatthew Dillon return;
9812545bca0SMatthew Dillon }
9822545bca0SMatthew Dillon ar = REQ_TO_RAID_ACTION_RESULT(req);
9832545bca0SMatthew Dillon if (rv != 0
9842545bca0SMatthew Dillon || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
9852545bca0SMatthew Dillon || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
9862545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
9872545bca0SMatthew Dillon "%d:%x:%x\n", rv, req->IOCStatus,
9882545bca0SMatthew Dillon ar->action_status);
9892545bca0SMatthew Dillon } else {
9902545bca0SMatthew Dillon vol_pg->VolumeSettings.Settings ^=
9912545bca0SMatthew Dillon MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
9922545bca0SMatthew Dillon }
9932545bca0SMatthew Dillon mpt_free_request(mpt, req);
9942545bca0SMatthew Dillon }
9952545bca0SMatthew Dillon
9962545bca0SMatthew Dillon static void
mpt_verify_resync_rate(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)9972545bca0SMatthew Dillon mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
9982545bca0SMatthew Dillon {
9992545bca0SMatthew Dillon request_t *req;
10002545bca0SMatthew Dillon struct mpt_raid_action_result *ar;
10012545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
10022545bca0SMatthew Dillon u_int prio;
10032545bca0SMatthew Dillon int rv;
10042545bca0SMatthew Dillon
10052545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
10062545bca0SMatthew Dillon
10072545bca0SMatthew Dillon if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
10082545bca0SMatthew Dillon return;
10092545bca0SMatthew Dillon
10102545bca0SMatthew Dillon /*
10112545bca0SMatthew Dillon * If the current RAID resync rate does not
10122545bca0SMatthew Dillon * match our configured rate, update it.
10132545bca0SMatthew Dillon */
10142545bca0SMatthew Dillon prio = vol_pg->VolumeSettings.Settings
10152545bca0SMatthew Dillon & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
10162545bca0SMatthew Dillon if (vol_pg->ResyncRate != 0
10172545bca0SMatthew Dillon && vol_pg->ResyncRate != mpt->raid_resync_rate) {
10182545bca0SMatthew Dillon
10192545bca0SMatthew Dillon req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
10202545bca0SMatthew Dillon if (req == NULL) {
10212545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
10222545bca0SMatthew Dillon "Get request failed!\n");
10232545bca0SMatthew Dillon return;
10242545bca0SMatthew Dillon }
10252545bca0SMatthew Dillon
10262545bca0SMatthew Dillon rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
10272545bca0SMatthew Dillon MPI_RAID_ACTION_SET_RESYNC_RATE,
10282545bca0SMatthew Dillon mpt->raid_resync_rate, /*addr*/0,
10292545bca0SMatthew Dillon /*len*/0, /*write*/FALSE, /*wait*/TRUE);
10302545bca0SMatthew Dillon if (rv == ETIMEDOUT) {
10312545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
10322545bca0SMatthew Dillon "Resync Rate Setting Timed-out\n");
10332545bca0SMatthew Dillon return;
10342545bca0SMatthew Dillon }
10352545bca0SMatthew Dillon
10362545bca0SMatthew Dillon ar = REQ_TO_RAID_ACTION_RESULT(req);
10372545bca0SMatthew Dillon if (rv != 0
10382545bca0SMatthew Dillon || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
10392545bca0SMatthew Dillon || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
10402545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
10412545bca0SMatthew Dillon "%d:%x:%x\n", rv, req->IOCStatus,
10422545bca0SMatthew Dillon ar->action_status);
10432545bca0SMatthew Dillon } else
10442545bca0SMatthew Dillon vol_pg->ResyncRate = mpt->raid_resync_rate;
10452545bca0SMatthew Dillon mpt_free_request(mpt, req);
10462545bca0SMatthew Dillon } else if ((prio && mpt->raid_resync_rate < 128)
10472545bca0SMatthew Dillon || (!prio && mpt->raid_resync_rate >= 128)) {
10482545bca0SMatthew Dillon uint32_t data;
10492545bca0SMatthew Dillon
10502545bca0SMatthew Dillon req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
10512545bca0SMatthew Dillon if (req == NULL) {
10522545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
10532545bca0SMatthew Dillon "Get request failed!\n");
10542545bca0SMatthew Dillon return;
10552545bca0SMatthew Dillon }
10562545bca0SMatthew Dillon
10572545bca0SMatthew Dillon vol_pg->VolumeSettings.Settings ^=
10582545bca0SMatthew Dillon MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
10592545bca0SMatthew Dillon memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
10602545bca0SMatthew Dillon vol_pg->VolumeSettings.Settings ^=
10612545bca0SMatthew Dillon MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
10622545bca0SMatthew Dillon rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
10632545bca0SMatthew Dillon MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
10642545bca0SMatthew Dillon data, /*addr*/0, /*len*/0,
10652545bca0SMatthew Dillon /*write*/FALSE, /*wait*/TRUE);
10662545bca0SMatthew Dillon if (rv == ETIMEDOUT) {
10672545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
10682545bca0SMatthew Dillon "Resync Rate Setting Timed-out\n");
10692545bca0SMatthew Dillon return;
10702545bca0SMatthew Dillon }
10712545bca0SMatthew Dillon ar = REQ_TO_RAID_ACTION_RESULT(req);
10722545bca0SMatthew Dillon if (rv != 0
10732545bca0SMatthew Dillon || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
10742545bca0SMatthew Dillon || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
10752545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
10762545bca0SMatthew Dillon "%d:%x:%x\n", rv, req->IOCStatus,
10772545bca0SMatthew Dillon ar->action_status);
10782545bca0SMatthew Dillon } else {
10792545bca0SMatthew Dillon vol_pg->VolumeSettings.Settings ^=
10802545bca0SMatthew Dillon MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
10812545bca0SMatthew Dillon }
10822545bca0SMatthew Dillon
10832545bca0SMatthew Dillon mpt_free_request(mpt, req);
10842545bca0SMatthew Dillon }
10852545bca0SMatthew Dillon }
10862545bca0SMatthew Dillon
10872545bca0SMatthew Dillon static void
mpt_adjust_queue_depth(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,struct cam_path * path)10882545bca0SMatthew Dillon mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
10892545bca0SMatthew Dillon struct cam_path *path)
10902545bca0SMatthew Dillon {
1091*cec957e9SMatthew Dillon struct ccb_relsim *crs;
10922545bca0SMatthew Dillon
1093*cec957e9SMatthew Dillon crs = &xpt_alloc_ccb()->crs;
1094*cec957e9SMatthew Dillon xpt_setup_ccb(&crs->ccb_h, path, /*priority*/5);
1095*cec957e9SMatthew Dillon crs->ccb_h.func_code = XPT_REL_SIMQ;
1096*cec957e9SMatthew Dillon crs->ccb_h.flags = CAM_DEV_QFREEZE;
1097*cec957e9SMatthew Dillon crs->release_flags = RELSIM_ADJUST_OPENINGS;
1098*cec957e9SMatthew Dillon crs->openings = mpt->raid_queue_depth;
1099*cec957e9SMatthew Dillon xpt_action((union ccb *)crs);
1100*cec957e9SMatthew Dillon if (crs->ccb_h.status != CAM_REQ_CMP) {
11012545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1102*cec957e9SMatthew Dillon "with CAM status %#x\n", crs->ccb_h.status);
1103*cec957e9SMatthew Dillon }
1104*cec957e9SMatthew Dillon xpt_free_ccb(&crs->ccb_h);
11052545bca0SMatthew Dillon }
11062545bca0SMatthew Dillon
11072545bca0SMatthew Dillon static void
mpt_announce_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)11082545bca0SMatthew Dillon mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
11092545bca0SMatthew Dillon {
11102545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
11112545bca0SMatthew Dillon u_int i;
11122545bca0SMatthew Dillon
11132545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
11142545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Settings (");
11152545bca0SMatthew Dillon for (i = 1; i <= 0x8000; i <<= 1) {
11162545bca0SMatthew Dillon switch (vol_pg->VolumeSettings.Settings & i) {
11172545bca0SMatthew Dillon case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
11182545bca0SMatthew Dillon mpt_prtc(mpt, " Member-WCE");
11192545bca0SMatthew Dillon break;
11202545bca0SMatthew Dillon case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
11212545bca0SMatthew Dillon mpt_prtc(mpt, " Offline-On-SMART-Err");
11222545bca0SMatthew Dillon break;
11232545bca0SMatthew Dillon case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
11242545bca0SMatthew Dillon mpt_prtc(mpt, " Hot-Plug-Spares");
11252545bca0SMatthew Dillon break;
11262545bca0SMatthew Dillon case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
11272545bca0SMatthew Dillon mpt_prtc(mpt, " High-Priority-ReSync");
11282545bca0SMatthew Dillon break;
11292545bca0SMatthew Dillon default:
11302545bca0SMatthew Dillon break;
11312545bca0SMatthew Dillon }
11322545bca0SMatthew Dillon }
11332545bca0SMatthew Dillon mpt_prtc(mpt, " )\n");
11342545bca0SMatthew Dillon if (vol_pg->VolumeSettings.HotSparePool != 0) {
11352545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
11362545bca0SMatthew Dillon powerof2(vol_pg->VolumeSettings.HotSparePool)
11372545bca0SMatthew Dillon ? ":" : "s:");
11382545bca0SMatthew Dillon for (i = 0; i < 8; i++) {
11392545bca0SMatthew Dillon u_int mask;
11402545bca0SMatthew Dillon
11412545bca0SMatthew Dillon mask = 0x1 << i;
11422545bca0SMatthew Dillon if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
11432545bca0SMatthew Dillon continue;
11442545bca0SMatthew Dillon mpt_prtc(mpt, " %d", i);
11452545bca0SMatthew Dillon }
11462545bca0SMatthew Dillon mpt_prtc(mpt, "\n");
11472545bca0SMatthew Dillon }
11482545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
11492545bca0SMatthew Dillon for (i = 0; i < vol_pg->NumPhysDisks; i++){
11502545bca0SMatthew Dillon struct mpt_raid_disk *mpt_disk;
11512545bca0SMatthew Dillon CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
11522545bca0SMatthew Dillon int pt_bus = cam_sim_bus(mpt->phydisk_sim);
11532545bca0SMatthew Dillon U8 f, s;
11542545bca0SMatthew Dillon
11552545bca0SMatthew Dillon mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
11562545bca0SMatthew Dillon disk_pg = &mpt_disk->config_page;
11572545bca0SMatthew Dillon mpt_prtc(mpt, " ");
11582545bca0SMatthew Dillon mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
11592545bca0SMatthew Dillon pt_bus, disk_pg->PhysDiskID);
11602545bca0SMatthew Dillon if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
11612545bca0SMatthew Dillon mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
11622545bca0SMatthew Dillon "Primary" : "Secondary");
11632545bca0SMatthew Dillon } else {
11642545bca0SMatthew Dillon mpt_prtc(mpt, "Stripe Position %d",
11652545bca0SMatthew Dillon mpt_disk->member_number);
11662545bca0SMatthew Dillon }
11672545bca0SMatthew Dillon f = disk_pg->PhysDiskStatus.Flags;
11682545bca0SMatthew Dillon s = disk_pg->PhysDiskStatus.State;
11692545bca0SMatthew Dillon if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
11702545bca0SMatthew Dillon mpt_prtc(mpt, " Out of Sync");
11712545bca0SMatthew Dillon }
11722545bca0SMatthew Dillon if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
11732545bca0SMatthew Dillon mpt_prtc(mpt, " Quiesced");
11742545bca0SMatthew Dillon }
11752545bca0SMatthew Dillon if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
11762545bca0SMatthew Dillon mpt_prtc(mpt, " Inactive");
11772545bca0SMatthew Dillon }
11782545bca0SMatthew Dillon if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
11792545bca0SMatthew Dillon mpt_prtc(mpt, " Was Optimal");
11802545bca0SMatthew Dillon }
11812545bca0SMatthew Dillon if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
11822545bca0SMatthew Dillon mpt_prtc(mpt, " Was Non-Optimal");
11832545bca0SMatthew Dillon }
11842545bca0SMatthew Dillon switch (s) {
11852545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_ONLINE:
11862545bca0SMatthew Dillon mpt_prtc(mpt, " Online");
11872545bca0SMatthew Dillon break;
11882545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_MISSING:
11892545bca0SMatthew Dillon mpt_prtc(mpt, " Missing");
11902545bca0SMatthew Dillon break;
11912545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
11922545bca0SMatthew Dillon mpt_prtc(mpt, " Incompatible");
11932545bca0SMatthew Dillon break;
11942545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_FAILED:
11952545bca0SMatthew Dillon mpt_prtc(mpt, " Failed");
11962545bca0SMatthew Dillon break;
11972545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_INITIALIZING:
11982545bca0SMatthew Dillon mpt_prtc(mpt, " Initializing");
11992545bca0SMatthew Dillon break;
12002545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
12012545bca0SMatthew Dillon mpt_prtc(mpt, " Requested Offline");
12022545bca0SMatthew Dillon break;
12032545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
12042545bca0SMatthew Dillon mpt_prtc(mpt, " Requested Failed");
12052545bca0SMatthew Dillon break;
12062545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
12072545bca0SMatthew Dillon default:
12082545bca0SMatthew Dillon mpt_prtc(mpt, " Offline Other (%x)", s);
12092545bca0SMatthew Dillon break;
12102545bca0SMatthew Dillon }
12112545bca0SMatthew Dillon mpt_prtc(mpt, "\n");
12122545bca0SMatthew Dillon }
12132545bca0SMatthew Dillon }
12142545bca0SMatthew Dillon
12152545bca0SMatthew Dillon static void
mpt_announce_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk)12162545bca0SMatthew Dillon mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
12172545bca0SMatthew Dillon {
12182545bca0SMatthew Dillon CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
12192545bca0SMatthew Dillon int rd_bus = cam_sim_bus(mpt->sim);
12202545bca0SMatthew Dillon int pt_bus = cam_sim_bus(mpt->phydisk_sim);
12212545bca0SMatthew Dillon u_int i;
12222545bca0SMatthew Dillon
12232545bca0SMatthew Dillon disk_pg = &mpt_disk->config_page;
12242545bca0SMatthew Dillon mpt_disk_prt(mpt, mpt_disk,
12256d259fc1SSascha Wildner "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
12262545bca0SMatthew Dillon device_get_nameunit(mpt->dev), rd_bus,
12272545bca0SMatthew Dillon disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
122857aef926SSascha Wildner pt_bus, (int)(mpt_disk - mpt->raid_disks));
12292545bca0SMatthew Dillon if (disk_pg->PhysDiskSettings.HotSparePool == 0)
12302545bca0SMatthew Dillon return;
12312545bca0SMatthew Dillon mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
12322545bca0SMatthew Dillon powerof2(disk_pg->PhysDiskSettings.HotSparePool)
12332545bca0SMatthew Dillon ? ":" : "s:");
12342545bca0SMatthew Dillon for (i = 0; i < 8; i++) {
12352545bca0SMatthew Dillon u_int mask;
12362545bca0SMatthew Dillon
12372545bca0SMatthew Dillon mask = 0x1 << i;
12382545bca0SMatthew Dillon if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
12392545bca0SMatthew Dillon continue;
12402545bca0SMatthew Dillon mpt_prtc(mpt, " %d", i);
12412545bca0SMatthew Dillon }
12422545bca0SMatthew Dillon mpt_prtc(mpt, "\n");
12432545bca0SMatthew Dillon }
12442545bca0SMatthew Dillon
12452545bca0SMatthew Dillon static void
mpt_refresh_raid_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk,IOC_3_PHYS_DISK * ioc_disk)12462545bca0SMatthew Dillon mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
12472545bca0SMatthew Dillon IOC_3_PHYS_DISK *ioc_disk)
12482545bca0SMatthew Dillon {
12492545bca0SMatthew Dillon int rv;
12502545bca0SMatthew Dillon
12512545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
12522545bca0SMatthew Dillon /*PageNumber*/0, ioc_disk->PhysDiskNum,
12532545bca0SMatthew Dillon &mpt_disk->config_page.Header,
12542545bca0SMatthew Dillon /*sleep_ok*/TRUE, /*timeout_ms*/5000);
12552545bca0SMatthew Dillon if (rv != 0) {
12562545bca0SMatthew Dillon mpt_prt(mpt, "mpt_refresh_raid_disk: "
12572545bca0SMatthew Dillon "Failed to read RAID Disk Hdr(%d)\n",
12582545bca0SMatthew Dillon ioc_disk->PhysDiskNum);
12592545bca0SMatthew Dillon return;
12602545bca0SMatthew Dillon }
12612545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
12622545bca0SMatthew Dillon &mpt_disk->config_page.Header,
12632545bca0SMatthew Dillon sizeof(mpt_disk->config_page),
12642545bca0SMatthew Dillon /*sleep_ok*/TRUE, /*timeout_ms*/5000);
12652545bca0SMatthew Dillon if (rv != 0)
12662545bca0SMatthew Dillon mpt_prt(mpt, "mpt_refresh_raid_disk: "
12672545bca0SMatthew Dillon "Failed to read RAID Disk Page(%d)\n",
12682545bca0SMatthew Dillon ioc_disk->PhysDiskNum);
12692545bca0SMatthew Dillon mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
12702545bca0SMatthew Dillon }
12712545bca0SMatthew Dillon
12722545bca0SMatthew Dillon static void
mpt_refresh_raid_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,CONFIG_PAGE_IOC_2_RAID_VOL * ioc_vol)12732545bca0SMatthew Dillon mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
12742545bca0SMatthew Dillon CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
12752545bca0SMatthew Dillon {
12762545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
12772545bca0SMatthew Dillon struct mpt_raid_action_result *ar;
12782545bca0SMatthew Dillon request_t *req;
12792545bca0SMatthew Dillon int rv;
12802545bca0SMatthew Dillon int i;
12812545bca0SMatthew Dillon
12822545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
12832545bca0SMatthew Dillon mpt_vol->flags &= ~MPT_RVF_UP2DATE;
12842545bca0SMatthew Dillon
12852545bca0SMatthew Dillon rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
12862545bca0SMatthew Dillon ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
12872545bca0SMatthew Dillon if (rv != 0) {
12882545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol,
12892545bca0SMatthew Dillon "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
12902545bca0SMatthew Dillon ioc_vol->VolumePageNumber);
12912545bca0SMatthew Dillon return;
12922545bca0SMatthew Dillon }
12932545bca0SMatthew Dillon
12942545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
12952545bca0SMatthew Dillon &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
12962545bca0SMatthew Dillon if (rv != 0) {
12972545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol,
12982545bca0SMatthew Dillon "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
12992545bca0SMatthew Dillon ioc_vol->VolumePageNumber);
13002545bca0SMatthew Dillon return;
13012545bca0SMatthew Dillon }
13022545bca0SMatthew Dillon mpt2host_config_page_raid_vol_0(vol_pg);
13032545bca0SMatthew Dillon
13042545bca0SMatthew Dillon mpt_vol->flags |= MPT_RVF_ACTIVE;
13052545bca0SMatthew Dillon
13062545bca0SMatthew Dillon /* Update disk entry array data. */
13072545bca0SMatthew Dillon for (i = 0; i < vol_pg->NumPhysDisks; i++) {
13082545bca0SMatthew Dillon struct mpt_raid_disk *mpt_disk;
13092545bca0SMatthew Dillon mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
13102545bca0SMatthew Dillon mpt_disk->volume = mpt_vol;
13112545bca0SMatthew Dillon mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
13122545bca0SMatthew Dillon if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
13132545bca0SMatthew Dillon mpt_disk->member_number--;
13142545bca0SMatthew Dillon }
13152545bca0SMatthew Dillon }
13162545bca0SMatthew Dillon
13172545bca0SMatthew Dillon if ((vol_pg->VolumeStatus.Flags
13182545bca0SMatthew Dillon & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
13192545bca0SMatthew Dillon return;
13202545bca0SMatthew Dillon
13212545bca0SMatthew Dillon req = mpt_get_request(mpt, TRUE);
13222545bca0SMatthew Dillon if (req == NULL) {
13232545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol,
13242545bca0SMatthew Dillon "mpt_refresh_raid_vol: Get request failed!\n");
13252545bca0SMatthew Dillon return;
13262545bca0SMatthew Dillon }
13272545bca0SMatthew Dillon rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
13282545bca0SMatthew Dillon MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
13292545bca0SMatthew Dillon if (rv == ETIMEDOUT) {
13302545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol,
13312545bca0SMatthew Dillon "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
13322545bca0SMatthew Dillon mpt_free_request(mpt, req);
13332545bca0SMatthew Dillon return;
13342545bca0SMatthew Dillon }
13352545bca0SMatthew Dillon
13362545bca0SMatthew Dillon ar = REQ_TO_RAID_ACTION_RESULT(req);
13372545bca0SMatthew Dillon if (rv == 0
13382545bca0SMatthew Dillon && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
13392545bca0SMatthew Dillon && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
13402545bca0SMatthew Dillon memcpy(&mpt_vol->sync_progress,
13412545bca0SMatthew Dillon &ar->action_data.indicator_struct,
13422545bca0SMatthew Dillon sizeof(mpt_vol->sync_progress));
13432545bca0SMatthew Dillon mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
13442545bca0SMatthew Dillon } else {
13452545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol,
13462545bca0SMatthew Dillon "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
13472545bca0SMatthew Dillon }
13482545bca0SMatthew Dillon mpt_free_request(mpt, req);
13492545bca0SMatthew Dillon }
13502545bca0SMatthew Dillon
13512545bca0SMatthew Dillon /*
13522545bca0SMatthew Dillon * Update in-core information about RAID support. We update any entries
13532545bca0SMatthew Dillon * that didn't previously exists or have been marked as needing to
13542545bca0SMatthew Dillon * be updated by our event handler. Interesting changes are displayed
13552545bca0SMatthew Dillon * to the console.
13562545bca0SMatthew Dillon */
13574c42baf4SSascha Wildner static int
mpt_refresh_raid_data(struct mpt_softc * mpt)13582545bca0SMatthew Dillon mpt_refresh_raid_data(struct mpt_softc *mpt)
13592545bca0SMatthew Dillon {
13602545bca0SMatthew Dillon CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
13612545bca0SMatthew Dillon CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
13622545bca0SMatthew Dillon IOC_3_PHYS_DISK *ioc_disk;
13632545bca0SMatthew Dillon IOC_3_PHYS_DISK *ioc_last_disk;
13642545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
13652545bca0SMatthew Dillon size_t len;
13662545bca0SMatthew Dillon int rv;
13672545bca0SMatthew Dillon int i;
13682545bca0SMatthew Dillon u_int nonopt_volumes;
13692545bca0SMatthew Dillon
13702545bca0SMatthew Dillon if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
13712545bca0SMatthew Dillon return (0);
13722545bca0SMatthew Dillon }
13732545bca0SMatthew Dillon
13742545bca0SMatthew Dillon /*
13752545bca0SMatthew Dillon * Mark all items as unreferenced by the configuration.
13762545bca0SMatthew Dillon * This allows us to find, report, and discard stale
13772545bca0SMatthew Dillon * entries.
13782545bca0SMatthew Dillon */
13792545bca0SMatthew Dillon for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
13802545bca0SMatthew Dillon mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
13812545bca0SMatthew Dillon }
13822545bca0SMatthew Dillon for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
13832545bca0SMatthew Dillon mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
13842545bca0SMatthew Dillon }
13852545bca0SMatthew Dillon
13862545bca0SMatthew Dillon /*
13872545bca0SMatthew Dillon * Get Physical Disk information.
13882545bca0SMatthew Dillon */
13892545bca0SMatthew Dillon len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
13902545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
13912545bca0SMatthew Dillon &mpt->ioc_page3->Header, len,
13922545bca0SMatthew Dillon /*sleep_ok*/TRUE, /*timeout_ms*/5000);
13932545bca0SMatthew Dillon if (rv) {
13942545bca0SMatthew Dillon mpt_prt(mpt,
13952545bca0SMatthew Dillon "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
13962545bca0SMatthew Dillon return (-1);
13972545bca0SMatthew Dillon }
13982545bca0SMatthew Dillon mpt2host_config_page_ioc3(mpt->ioc_page3);
13992545bca0SMatthew Dillon
14002545bca0SMatthew Dillon ioc_disk = mpt->ioc_page3->PhysDisk;
14012545bca0SMatthew Dillon ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
14022545bca0SMatthew Dillon for (; ioc_disk != ioc_last_disk; ioc_disk++) {
14032545bca0SMatthew Dillon struct mpt_raid_disk *mpt_disk;
14042545bca0SMatthew Dillon
14052545bca0SMatthew Dillon mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
14062545bca0SMatthew Dillon mpt_disk->flags |= MPT_RDF_REFERENCED;
14072545bca0SMatthew Dillon if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
14082545bca0SMatthew Dillon != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
14092545bca0SMatthew Dillon
14102545bca0SMatthew Dillon mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
14112545bca0SMatthew Dillon
14122545bca0SMatthew Dillon }
14132545bca0SMatthew Dillon mpt_disk->flags |= MPT_RDF_ACTIVE;
14142545bca0SMatthew Dillon mpt->raid_rescan++;
14152545bca0SMatthew Dillon }
14162545bca0SMatthew Dillon
14172545bca0SMatthew Dillon /*
14182545bca0SMatthew Dillon * Refresh volume data.
14192545bca0SMatthew Dillon */
14202545bca0SMatthew Dillon len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
14212545bca0SMatthew Dillon rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
14222545bca0SMatthew Dillon &mpt->ioc_page2->Header, len,
14232545bca0SMatthew Dillon /*sleep_ok*/TRUE, /*timeout_ms*/5000);
14242545bca0SMatthew Dillon if (rv) {
14252545bca0SMatthew Dillon mpt_prt(mpt, "mpt_refresh_raid_data: "
14262545bca0SMatthew Dillon "Failed to read IOC Page 2\n");
14272545bca0SMatthew Dillon return (-1);
14282545bca0SMatthew Dillon }
14292545bca0SMatthew Dillon mpt2host_config_page_ioc2(mpt->ioc_page2);
14302545bca0SMatthew Dillon
14312545bca0SMatthew Dillon ioc_vol = mpt->ioc_page2->RaidVolume;
14322545bca0SMatthew Dillon ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
14332545bca0SMatthew Dillon for (;ioc_vol != ioc_last_vol; ioc_vol++) {
14342545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
14352545bca0SMatthew Dillon
14362545bca0SMatthew Dillon mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
14372545bca0SMatthew Dillon mpt_vol->flags |= MPT_RVF_REFERENCED;
14382545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
14392545bca0SMatthew Dillon if (vol_pg == NULL)
14402545bca0SMatthew Dillon continue;
14412545bca0SMatthew Dillon if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
14422545bca0SMatthew Dillon != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
14432545bca0SMatthew Dillon || (vol_pg->VolumeStatus.Flags
14442545bca0SMatthew Dillon & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
14452545bca0SMatthew Dillon
14462545bca0SMatthew Dillon mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
14472545bca0SMatthew Dillon }
14482545bca0SMatthew Dillon mpt_vol->flags |= MPT_RVF_ACTIVE;
14492545bca0SMatthew Dillon }
14502545bca0SMatthew Dillon
14512545bca0SMatthew Dillon nonopt_volumes = 0;
14522545bca0SMatthew Dillon for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
14532545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
14542545bca0SMatthew Dillon uint64_t total;
14552545bca0SMatthew Dillon uint64_t left;
14562545bca0SMatthew Dillon int m;
14572545bca0SMatthew Dillon u_int prio;
14582545bca0SMatthew Dillon
14592545bca0SMatthew Dillon mpt_vol = &mpt->raid_volumes[i];
14602545bca0SMatthew Dillon
14612545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
14622545bca0SMatthew Dillon continue;
14632545bca0SMatthew Dillon }
14642545bca0SMatthew Dillon
14652545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
14662545bca0SMatthew Dillon if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
14672545bca0SMatthew Dillon == MPT_RVF_ANNOUNCED) {
14682545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
14692545bca0SMatthew Dillon mpt_vol->flags = 0;
14702545bca0SMatthew Dillon continue;
14712545bca0SMatthew Dillon }
14722545bca0SMatthew Dillon
14732545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
14742545bca0SMatthew Dillon mpt_announce_vol(mpt, mpt_vol);
14752545bca0SMatthew Dillon mpt_vol->flags |= MPT_RVF_ANNOUNCED;
14762545bca0SMatthew Dillon }
14772545bca0SMatthew Dillon
14782545bca0SMatthew Dillon if (vol_pg->VolumeStatus.State !=
14792545bca0SMatthew Dillon MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
14802545bca0SMatthew Dillon nonopt_volumes++;
14812545bca0SMatthew Dillon
14822545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
14832545bca0SMatthew Dillon continue;
14842545bca0SMatthew Dillon
14852545bca0SMatthew Dillon mpt_vol->flags |= MPT_RVF_UP2DATE;
14862545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
14872545bca0SMatthew Dillon mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
14882545bca0SMatthew Dillon mpt_verify_mwce(mpt, mpt_vol);
14892545bca0SMatthew Dillon
14902545bca0SMatthew Dillon if (vol_pg->VolumeStatus.Flags == 0) {
14912545bca0SMatthew Dillon continue;
14922545bca0SMatthew Dillon }
14932545bca0SMatthew Dillon
14942545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Status (");
14952545bca0SMatthew Dillon for (m = 1; m <= 0x80; m <<= 1) {
14962545bca0SMatthew Dillon switch (vol_pg->VolumeStatus.Flags & m) {
14972545bca0SMatthew Dillon case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
14982545bca0SMatthew Dillon mpt_prtc(mpt, " Enabled");
14992545bca0SMatthew Dillon break;
15002545bca0SMatthew Dillon case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
15012545bca0SMatthew Dillon mpt_prtc(mpt, " Quiesced");
15022545bca0SMatthew Dillon break;
15032545bca0SMatthew Dillon case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
15042545bca0SMatthew Dillon mpt_prtc(mpt, " Re-Syncing");
15052545bca0SMatthew Dillon break;
15062545bca0SMatthew Dillon case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
15072545bca0SMatthew Dillon mpt_prtc(mpt, " Inactive");
15082545bca0SMatthew Dillon break;
15092545bca0SMatthew Dillon default:
15102545bca0SMatthew Dillon break;
15112545bca0SMatthew Dillon }
15122545bca0SMatthew Dillon }
15132545bca0SMatthew Dillon mpt_prtc(mpt, " )\n");
15142545bca0SMatthew Dillon
15152545bca0SMatthew Dillon if ((vol_pg->VolumeStatus.Flags
15162545bca0SMatthew Dillon & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
15172545bca0SMatthew Dillon continue;
15182545bca0SMatthew Dillon
15192545bca0SMatthew Dillon mpt_verify_resync_rate(mpt, mpt_vol);
15202545bca0SMatthew Dillon
15212545bca0SMatthew Dillon left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
15222545bca0SMatthew Dillon total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
15232545bca0SMatthew Dillon if (vol_pg->ResyncRate != 0) {
15242545bca0SMatthew Dillon
15252545bca0SMatthew Dillon prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
15262545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
15272545bca0SMatthew Dillon prio / 1000, prio % 1000);
15282545bca0SMatthew Dillon } else {
15292545bca0SMatthew Dillon prio = vol_pg->VolumeSettings.Settings
15302545bca0SMatthew Dillon & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
15312545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
15322545bca0SMatthew Dillon prio ? "High" : "Low");
15332545bca0SMatthew Dillon }
15342545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
15352545bca0SMatthew Dillon "blocks remaining\n", (uintmax_t)left,
15362545bca0SMatthew Dillon (uintmax_t)total);
15372545bca0SMatthew Dillon
15382545bca0SMatthew Dillon /* Periodically report on sync progress. */
15392545bca0SMatthew Dillon mpt_schedule_raid_refresh(mpt);
15402545bca0SMatthew Dillon }
15412545bca0SMatthew Dillon
15422545bca0SMatthew Dillon for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
15432545bca0SMatthew Dillon struct mpt_raid_disk *mpt_disk;
15442545bca0SMatthew Dillon CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
15452545bca0SMatthew Dillon int m;
15462545bca0SMatthew Dillon
15472545bca0SMatthew Dillon mpt_disk = &mpt->raid_disks[i];
15482545bca0SMatthew Dillon disk_pg = &mpt_disk->config_page;
15492545bca0SMatthew Dillon
15502545bca0SMatthew Dillon if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
15512545bca0SMatthew Dillon continue;
15522545bca0SMatthew Dillon
15532545bca0SMatthew Dillon if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
15542545bca0SMatthew Dillon == MPT_RDF_ANNOUNCED) {
15552545bca0SMatthew Dillon mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
15562545bca0SMatthew Dillon mpt_disk->flags = 0;
15572545bca0SMatthew Dillon mpt->raid_rescan++;
15582545bca0SMatthew Dillon continue;
15592545bca0SMatthew Dillon }
15602545bca0SMatthew Dillon
15612545bca0SMatthew Dillon if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
15622545bca0SMatthew Dillon
15632545bca0SMatthew Dillon mpt_announce_disk(mpt, mpt_disk);
15642545bca0SMatthew Dillon mpt_disk->flags |= MPT_RVF_ANNOUNCED;
15652545bca0SMatthew Dillon }
15662545bca0SMatthew Dillon
15672545bca0SMatthew Dillon if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
15682545bca0SMatthew Dillon continue;
15692545bca0SMatthew Dillon
15702545bca0SMatthew Dillon mpt_disk->flags |= MPT_RDF_UP2DATE;
15712545bca0SMatthew Dillon mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
15722545bca0SMatthew Dillon if (disk_pg->PhysDiskStatus.Flags == 0)
15732545bca0SMatthew Dillon continue;
15742545bca0SMatthew Dillon
15752545bca0SMatthew Dillon mpt_disk_prt(mpt, mpt_disk, "Status (");
15762545bca0SMatthew Dillon for (m = 1; m <= 0x80; m <<= 1) {
15772545bca0SMatthew Dillon switch (disk_pg->PhysDiskStatus.Flags & m) {
15782545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
15792545bca0SMatthew Dillon mpt_prtc(mpt, " Out-Of-Sync");
15802545bca0SMatthew Dillon break;
15812545bca0SMatthew Dillon case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
15822545bca0SMatthew Dillon mpt_prtc(mpt, " Quiesced");
15832545bca0SMatthew Dillon break;
15842545bca0SMatthew Dillon default:
15852545bca0SMatthew Dillon break;
15862545bca0SMatthew Dillon }
15872545bca0SMatthew Dillon }
15882545bca0SMatthew Dillon mpt_prtc(mpt, " )\n");
15892545bca0SMatthew Dillon }
15902545bca0SMatthew Dillon
15912545bca0SMatthew Dillon mpt->raid_nonopt_volumes = nonopt_volumes;
15922545bca0SMatthew Dillon return (0);
15932545bca0SMatthew Dillon }
15942545bca0SMatthew Dillon
15952545bca0SMatthew Dillon static void
mpt_raid_timer(void * arg)15962545bca0SMatthew Dillon mpt_raid_timer(void *arg)
15972545bca0SMatthew Dillon {
15982545bca0SMatthew Dillon struct mpt_softc *mpt;
15992545bca0SMatthew Dillon
16002545bca0SMatthew Dillon mpt = (struct mpt_softc *)arg;
1601ee1d6beaSSascha Wildner MPT_LOCK(mpt);
16022545bca0SMatthew Dillon mpt_raid_wakeup(mpt);
1603ee1d6beaSSascha Wildner MPT_UNLOCK(mpt);
16042545bca0SMatthew Dillon }
16052545bca0SMatthew Dillon
16064c42baf4SSascha Wildner static void
mpt_schedule_raid_refresh(struct mpt_softc * mpt)16072545bca0SMatthew Dillon mpt_schedule_raid_refresh(struct mpt_softc *mpt)
16082545bca0SMatthew Dillon {
16094c42baf4SSascha Wildner
16102545bca0SMatthew Dillon callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
16112545bca0SMatthew Dillon mpt_raid_timer, mpt);
16122545bca0SMatthew Dillon }
16132545bca0SMatthew Dillon
16142545bca0SMatthew Dillon void
mpt_raid_free_mem(struct mpt_softc * mpt)16152545bca0SMatthew Dillon mpt_raid_free_mem(struct mpt_softc *mpt)
16162545bca0SMatthew Dillon {
16172545bca0SMatthew Dillon
16182545bca0SMatthew Dillon if (mpt->raid_volumes) {
16192545bca0SMatthew Dillon struct mpt_raid_volume *mpt_raid;
16202545bca0SMatthew Dillon int i;
16212545bca0SMatthew Dillon for (i = 0; i < mpt->raid_max_volumes; i++) {
16222545bca0SMatthew Dillon mpt_raid = &mpt->raid_volumes[i];
16232545bca0SMatthew Dillon if (mpt_raid->config_page) {
16242545bca0SMatthew Dillon kfree(mpt_raid->config_page, M_DEVBUF);
16252545bca0SMatthew Dillon mpt_raid->config_page = NULL;
16262545bca0SMatthew Dillon }
16272545bca0SMatthew Dillon }
16282545bca0SMatthew Dillon kfree(mpt->raid_volumes, M_DEVBUF);
16292545bca0SMatthew Dillon mpt->raid_volumes = NULL;
16302545bca0SMatthew Dillon }
16312545bca0SMatthew Dillon if (mpt->raid_disks) {
16322545bca0SMatthew Dillon kfree(mpt->raid_disks, M_DEVBUF);
16332545bca0SMatthew Dillon mpt->raid_disks = NULL;
16342545bca0SMatthew Dillon }
16352545bca0SMatthew Dillon if (mpt->ioc_page2) {
16362545bca0SMatthew Dillon kfree(mpt->ioc_page2, M_DEVBUF);
16372545bca0SMatthew Dillon mpt->ioc_page2 = NULL;
16382545bca0SMatthew Dillon }
16392545bca0SMatthew Dillon if (mpt->ioc_page3) {
16402545bca0SMatthew Dillon kfree(mpt->ioc_page3, M_DEVBUF);
16412545bca0SMatthew Dillon mpt->ioc_page3 = NULL;
16422545bca0SMatthew Dillon }
16432545bca0SMatthew Dillon mpt->raid_max_volumes = 0;
16442545bca0SMatthew Dillon mpt->raid_max_disks = 0;
16452545bca0SMatthew Dillon }
16462545bca0SMatthew Dillon
16472545bca0SMatthew Dillon static int
mpt_raid_set_vol_resync_rate(struct mpt_softc * mpt,u_int rate)16482545bca0SMatthew Dillon mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
16492545bca0SMatthew Dillon {
16502545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
16512545bca0SMatthew Dillon
16522545bca0SMatthew Dillon if ((rate > MPT_RAID_RESYNC_RATE_MAX
16532545bca0SMatthew Dillon || rate < MPT_RAID_RESYNC_RATE_MIN)
16542545bca0SMatthew Dillon && rate != MPT_RAID_RESYNC_RATE_NC)
16552545bca0SMatthew Dillon return (EINVAL);
16562545bca0SMatthew Dillon
16572545bca0SMatthew Dillon MPT_LOCK(mpt);
16582545bca0SMatthew Dillon mpt->raid_resync_rate = rate;
16592545bca0SMatthew Dillon RAID_VOL_FOREACH(mpt, mpt_vol) {
16602545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
16612545bca0SMatthew Dillon continue;
16622545bca0SMatthew Dillon }
16632545bca0SMatthew Dillon mpt_verify_resync_rate(mpt, mpt_vol);
16642545bca0SMatthew Dillon }
16652545bca0SMatthew Dillon MPT_UNLOCK(mpt);
16662545bca0SMatthew Dillon return (0);
16672545bca0SMatthew Dillon }
16682545bca0SMatthew Dillon
16692545bca0SMatthew Dillon static int
mpt_raid_set_vol_queue_depth(struct mpt_softc * mpt,u_int vol_queue_depth)16702545bca0SMatthew Dillon mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
16712545bca0SMatthew Dillon {
16722545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
16732545bca0SMatthew Dillon
16742545bca0SMatthew Dillon if (vol_queue_depth > 255 || vol_queue_depth < 1)
16752545bca0SMatthew Dillon return (EINVAL);
16762545bca0SMatthew Dillon
16772545bca0SMatthew Dillon MPT_LOCK(mpt);
16782545bca0SMatthew Dillon mpt->raid_queue_depth = vol_queue_depth;
16792545bca0SMatthew Dillon RAID_VOL_FOREACH(mpt, mpt_vol) {
16802545bca0SMatthew Dillon struct cam_path *path;
16812545bca0SMatthew Dillon int error;
16822545bca0SMatthew Dillon
16832545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
16842545bca0SMatthew Dillon continue;
16852545bca0SMatthew Dillon
16862545bca0SMatthew Dillon mpt->raid_rescan = 0;
16872545bca0SMatthew Dillon
16882545bca0SMatthew Dillon error = xpt_create_path(&path, xpt_periph,
16892545bca0SMatthew Dillon cam_sim_path(mpt->sim),
16902545bca0SMatthew Dillon mpt_vol->config_page->VolumeID,
16912545bca0SMatthew Dillon /*lun*/0);
16922545bca0SMatthew Dillon if (error != CAM_REQ_CMP) {
16932545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
16942545bca0SMatthew Dillon continue;
16952545bca0SMatthew Dillon }
16962545bca0SMatthew Dillon mpt_adjust_queue_depth(mpt, mpt_vol, path);
16972545bca0SMatthew Dillon xpt_free_path(path);
16982545bca0SMatthew Dillon }
16992545bca0SMatthew Dillon MPT_UNLOCK(mpt);
17002545bca0SMatthew Dillon return (0);
17012545bca0SMatthew Dillon }
17022545bca0SMatthew Dillon
17032545bca0SMatthew Dillon static int
mpt_raid_set_vol_mwce(struct mpt_softc * mpt,mpt_raid_mwce_t mwce)17042545bca0SMatthew Dillon mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
17052545bca0SMatthew Dillon {
17062545bca0SMatthew Dillon struct mpt_raid_volume *mpt_vol;
17072545bca0SMatthew Dillon int force_full_resync;
17082545bca0SMatthew Dillon
17092545bca0SMatthew Dillon MPT_LOCK(mpt);
17102545bca0SMatthew Dillon if (mwce == mpt->raid_mwce_setting) {
17112545bca0SMatthew Dillon MPT_UNLOCK(mpt);
17122545bca0SMatthew Dillon return (0);
17132545bca0SMatthew Dillon }
17142545bca0SMatthew Dillon
17152545bca0SMatthew Dillon /*
17162545bca0SMatthew Dillon * Catch MWCE being left on due to a failed shutdown. Since
17172545bca0SMatthew Dillon * sysctls cannot be set by the loader, we treat the first
17182545bca0SMatthew Dillon * setting of this varible specially and force a full volume
17192545bca0SMatthew Dillon * resync if MWCE is enabled and a resync is in progress.
17202545bca0SMatthew Dillon */
17212545bca0SMatthew Dillon force_full_resync = 0;
17222545bca0SMatthew Dillon if (mpt->raid_mwce_set == 0
17232545bca0SMatthew Dillon && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
17242545bca0SMatthew Dillon && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
17252545bca0SMatthew Dillon force_full_resync = 1;
17262545bca0SMatthew Dillon
17272545bca0SMatthew Dillon mpt->raid_mwce_setting = mwce;
17282545bca0SMatthew Dillon RAID_VOL_FOREACH(mpt, mpt_vol) {
17292545bca0SMatthew Dillon CONFIG_PAGE_RAID_VOL_0 *vol_pg;
17302545bca0SMatthew Dillon int resyncing;
17312545bca0SMatthew Dillon int mwce;
17322545bca0SMatthew Dillon
17332545bca0SMatthew Dillon if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
17342545bca0SMatthew Dillon continue;
17352545bca0SMatthew Dillon
17362545bca0SMatthew Dillon vol_pg = mpt_vol->config_page;
17372545bca0SMatthew Dillon resyncing = vol_pg->VolumeStatus.Flags
17382545bca0SMatthew Dillon & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
17392545bca0SMatthew Dillon mwce = vol_pg->VolumeSettings.Settings
17402545bca0SMatthew Dillon & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
17412545bca0SMatthew Dillon if (force_full_resync && resyncing && mwce) {
17422545bca0SMatthew Dillon
17432545bca0SMatthew Dillon /*
17442545bca0SMatthew Dillon * XXX disable/enable volume should force a resync,
17452545bca0SMatthew Dillon * but we'll need to queice, drain, and restart
17462545bca0SMatthew Dillon * I/O to do that.
17472545bca0SMatthew Dillon */
17482545bca0SMatthew Dillon mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
17492545bca0SMatthew Dillon "detected. Suggest full resync.\n");
17502545bca0SMatthew Dillon }
17512545bca0SMatthew Dillon mpt_verify_mwce(mpt, mpt_vol);
17522545bca0SMatthew Dillon }
17532545bca0SMatthew Dillon mpt->raid_mwce_set = 1;
17542545bca0SMatthew Dillon MPT_UNLOCK(mpt);
17552545bca0SMatthew Dillon return (0);
17562545bca0SMatthew Dillon }
17574c42baf4SSascha Wildner
17584c42baf4SSascha Wildner static const char *mpt_vol_mwce_strs[] =
17592545bca0SMatthew Dillon {
17602545bca0SMatthew Dillon "On",
17612545bca0SMatthew Dillon "Off",
17622545bca0SMatthew Dillon "On-During-Rebuild",
17632545bca0SMatthew Dillon "NC"
17642545bca0SMatthew Dillon };
17652545bca0SMatthew Dillon
17662545bca0SMatthew Dillon static int
mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)17672545bca0SMatthew Dillon mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
17682545bca0SMatthew Dillon {
17692545bca0SMatthew Dillon char inbuf[20];
17702545bca0SMatthew Dillon struct mpt_softc *mpt;
17712545bca0SMatthew Dillon const char *str;
17722545bca0SMatthew Dillon int error;
17732545bca0SMatthew Dillon u_int size;
17742545bca0SMatthew Dillon u_int i;
17752545bca0SMatthew Dillon
17762545bca0SMatthew Dillon mpt = (struct mpt_softc *)arg1;
17772545bca0SMatthew Dillon str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
17782545bca0SMatthew Dillon error = SYSCTL_OUT(req, str, strlen(str) + 1);
17792545bca0SMatthew Dillon if (error || !req->newptr) {
17802545bca0SMatthew Dillon return (error);
17812545bca0SMatthew Dillon }
17822545bca0SMatthew Dillon
17832545bca0SMatthew Dillon size = req->newlen - req->newidx;
17842545bca0SMatthew Dillon if (size >= sizeof(inbuf)) {
17852545bca0SMatthew Dillon return (EINVAL);
17862545bca0SMatthew Dillon }
17872545bca0SMatthew Dillon
17882545bca0SMatthew Dillon error = SYSCTL_IN(req, inbuf, size);
17892545bca0SMatthew Dillon if (error) {
17902545bca0SMatthew Dillon return (error);
17912545bca0SMatthew Dillon }
17922545bca0SMatthew Dillon inbuf[size] = '\0';
17932545bca0SMatthew Dillon for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
17942545bca0SMatthew Dillon if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
17952545bca0SMatthew Dillon return (mpt_raid_set_vol_mwce(mpt, i));
17962545bca0SMatthew Dillon }
17972545bca0SMatthew Dillon }
17982545bca0SMatthew Dillon return (EINVAL);
17992545bca0SMatthew Dillon }
18002545bca0SMatthew Dillon
18012545bca0SMatthew Dillon static int
mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)18022545bca0SMatthew Dillon mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
18032545bca0SMatthew Dillon {
18042545bca0SMatthew Dillon struct mpt_softc *mpt;
18052545bca0SMatthew Dillon u_int raid_resync_rate;
18062545bca0SMatthew Dillon int error;
18072545bca0SMatthew Dillon
18082545bca0SMatthew Dillon mpt = (struct mpt_softc *)arg1;
18092545bca0SMatthew Dillon raid_resync_rate = mpt->raid_resync_rate;
18102545bca0SMatthew Dillon
18112545bca0SMatthew Dillon error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
18122545bca0SMatthew Dillon if (error || !req->newptr) {
18132545bca0SMatthew Dillon return error;
18142545bca0SMatthew Dillon }
18152545bca0SMatthew Dillon
18162545bca0SMatthew Dillon return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
18172545bca0SMatthew Dillon }
18182545bca0SMatthew Dillon
18192545bca0SMatthew Dillon static int
mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)18202545bca0SMatthew Dillon mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
18212545bca0SMatthew Dillon {
18222545bca0SMatthew Dillon struct mpt_softc *mpt;
18232545bca0SMatthew Dillon u_int raid_queue_depth;
18242545bca0SMatthew Dillon int error;
18252545bca0SMatthew Dillon
18262545bca0SMatthew Dillon mpt = (struct mpt_softc *)arg1;
18272545bca0SMatthew Dillon raid_queue_depth = mpt->raid_queue_depth;
18282545bca0SMatthew Dillon
18292545bca0SMatthew Dillon error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
18302545bca0SMatthew Dillon if (error || !req->newptr) {
18312545bca0SMatthew Dillon return error;
18322545bca0SMatthew Dillon }
18332545bca0SMatthew Dillon
18342545bca0SMatthew Dillon return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
18352545bca0SMatthew Dillon }
18362545bca0SMatthew Dillon
18372545bca0SMatthew Dillon static void
mpt_raid_sysctl_attach(struct mpt_softc * mpt)18382545bca0SMatthew Dillon mpt_raid_sysctl_attach(struct mpt_softc *mpt)
18392545bca0SMatthew Dillon {
184026595b18SSascha Wildner struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
184126595b18SSascha Wildner struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
184226595b18SSascha Wildner
184326595b18SSascha Wildner SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
18442545bca0SMatthew Dillon "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
18452545bca0SMatthew Dillon mpt_raid_sysctl_vol_member_wce, "A",
18462545bca0SMatthew Dillon "volume member WCE(On,Off,On-During-Rebuild,NC)");
18472545bca0SMatthew Dillon
184826595b18SSascha Wildner SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
18492545bca0SMatthew Dillon "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
18502545bca0SMatthew Dillon mpt_raid_sysctl_vol_queue_depth, "I",
18512545bca0SMatthew Dillon "default volume queue depth");
18522545bca0SMatthew Dillon
185326595b18SSascha Wildner SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
18542545bca0SMatthew Dillon "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
18552545bca0SMatthew Dillon mpt_raid_sysctl_vol_resync_rate, "I",
18562545bca0SMatthew Dillon "volume resync priority (0 == NC, 1 - 255)");
185726595b18SSascha Wildner SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
18582545bca0SMatthew Dillon "nonoptimal_volumes", CTLFLAG_RD,
18592545bca0SMatthew Dillon &mpt->raid_nonopt_volumes, 0,
18602545bca0SMatthew Dillon "number of nonoptimal volumes");
18612545bca0SMatthew Dillon }
1862