12545bca0SMatthew Dillon /*-
22545bca0SMatthew Dillon * Copyright (c) 2008 Yahoo!, Inc.
32545bca0SMatthew Dillon * All rights reserved.
42545bca0SMatthew Dillon * Written by: John Baldwin <jhb@FreeBSD.org>
52545bca0SMatthew Dillon *
62545bca0SMatthew Dillon * Redistribution and use in source and binary forms, with or without
72545bca0SMatthew Dillon * modification, are permitted provided that the following conditions
82545bca0SMatthew Dillon * are met:
92545bca0SMatthew Dillon * 1. Redistributions of source code must retain the above copyright
102545bca0SMatthew Dillon * notice, this list of conditions and the following disclaimer.
112545bca0SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright
122545bca0SMatthew Dillon * notice, this list of conditions and the following disclaimer in the
132545bca0SMatthew Dillon * documentation and/or other materials provided with the distribution.
142545bca0SMatthew Dillon * 3. Neither the name of the author nor the names of any co-contributors
152545bca0SMatthew Dillon * may be used to endorse or promote products derived from this software
162545bca0SMatthew Dillon * without specific prior written permission.
172545bca0SMatthew Dillon *
182545bca0SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
192545bca0SMatthew Dillon * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
202545bca0SMatthew Dillon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
212545bca0SMatthew Dillon * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
222545bca0SMatthew Dillon * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
232545bca0SMatthew Dillon * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
242545bca0SMatthew Dillon * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
252545bca0SMatthew Dillon * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
262545bca0SMatthew Dillon * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
272545bca0SMatthew Dillon * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
282545bca0SMatthew Dillon * SUCH DAMAGE.
292545bca0SMatthew Dillon *
302545bca0SMatthew Dillon * LSI MPT-Fusion Host Adapter FreeBSD userland interface
3132af04f7SSascha Wildner *
32f582582cSSascha Wildner * $FreeBSD: head/sys/dev/mpt/mpt_user.c 251187 2013-05-31 17:27:44Z delphij $
332545bca0SMatthew Dillon */
342545bca0SMatthew Dillon
352545bca0SMatthew Dillon #include <sys/param.h>
362545bca0SMatthew Dillon #include <sys/conf.h>
376d259fc1SSascha Wildner #include <sys/device.h>
382545bca0SMatthew Dillon #include <sys/errno.h>
392545bca0SMatthew Dillon #include <sys/mpt_ioctl.h>
402545bca0SMatthew Dillon
412545bca0SMatthew Dillon #include <dev/disk/mpt/mpt.h>
422545bca0SMatthew Dillon
432545bca0SMatthew Dillon struct mpt_user_raid_action_result {
442545bca0SMatthew Dillon uint32_t volume_status;
452545bca0SMatthew Dillon uint32_t action_data[4];
462545bca0SMatthew Dillon uint16_t action_status;
472545bca0SMatthew Dillon };
482545bca0SMatthew Dillon
492545bca0SMatthew Dillon struct mpt_page_memory {
502545bca0SMatthew Dillon bus_dma_tag_t tag;
512545bca0SMatthew Dillon bus_dmamap_t map;
522545bca0SMatthew Dillon bus_addr_t paddr;
532545bca0SMatthew Dillon void *vaddr;
542545bca0SMatthew Dillon };
552545bca0SMatthew Dillon
562545bca0SMatthew Dillon static mpt_probe_handler_t mpt_user_probe;
572545bca0SMatthew Dillon static mpt_attach_handler_t mpt_user_attach;
582545bca0SMatthew Dillon static mpt_enable_handler_t mpt_user_enable;
592545bca0SMatthew Dillon static mpt_ready_handler_t mpt_user_ready;
602545bca0SMatthew Dillon static mpt_event_handler_t mpt_user_event;
612545bca0SMatthew Dillon static mpt_reset_handler_t mpt_user_reset;
622545bca0SMatthew Dillon static mpt_detach_handler_t mpt_user_detach;
632545bca0SMatthew Dillon
642545bca0SMatthew Dillon static struct mpt_personality mpt_user_personality = {
652545bca0SMatthew Dillon .name = "mpt_user",
662545bca0SMatthew Dillon .probe = mpt_user_probe,
672545bca0SMatthew Dillon .attach = mpt_user_attach,
682545bca0SMatthew Dillon .enable = mpt_user_enable,
692545bca0SMatthew Dillon .ready = mpt_user_ready,
702545bca0SMatthew Dillon .event = mpt_user_event,
712545bca0SMatthew Dillon .reset = mpt_user_reset,
722545bca0SMatthew Dillon .detach = mpt_user_detach,
732545bca0SMatthew Dillon };
742545bca0SMatthew Dillon
752545bca0SMatthew Dillon DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
762545bca0SMatthew Dillon
772545bca0SMatthew Dillon static mpt_reply_handler_t mpt_user_reply_handler;
782545bca0SMatthew Dillon
796d259fc1SSascha Wildner static d_open_t mpt_open;
806d259fc1SSascha Wildner static d_close_t mpt_close;
816d259fc1SSascha Wildner static d_ioctl_t mpt_ioctl;
822545bca0SMatthew Dillon
836d259fc1SSascha Wildner static struct dev_ops mpt_ops = {
84*16182b49SSascha Wildner { "mpt", 0, D_MPSAFE },
852545bca0SMatthew Dillon .d_open = mpt_open,
862545bca0SMatthew Dillon .d_close = mpt_close,
872545bca0SMatthew Dillon .d_ioctl = mpt_ioctl,
882545bca0SMatthew Dillon };
892545bca0SMatthew Dillon
902545bca0SMatthew Dillon static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
912545bca0SMatthew Dillon
924c42baf4SSascha Wildner static int
mpt_user_probe(struct mpt_softc * mpt)932545bca0SMatthew Dillon mpt_user_probe(struct mpt_softc *mpt)
942545bca0SMatthew Dillon {
952545bca0SMatthew Dillon
962545bca0SMatthew Dillon /* Attach to every controller. */
972545bca0SMatthew Dillon return (0);
982545bca0SMatthew Dillon }
992545bca0SMatthew Dillon
1004c42baf4SSascha Wildner static int
mpt_user_attach(struct mpt_softc * mpt)1012545bca0SMatthew Dillon mpt_user_attach(struct mpt_softc *mpt)
1022545bca0SMatthew Dillon {
1032545bca0SMatthew Dillon mpt_handler_t handler;
1042545bca0SMatthew Dillon int error, unit;
1052545bca0SMatthew Dillon
1062545bca0SMatthew Dillon MPT_LOCK(mpt);
1072545bca0SMatthew Dillon handler.reply_handler = mpt_user_reply_handler;
1082545bca0SMatthew Dillon error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
1092545bca0SMatthew Dillon &user_handler_id);
1102545bca0SMatthew Dillon MPT_UNLOCK(mpt);
1112545bca0SMatthew Dillon if (error != 0) {
1122545bca0SMatthew Dillon mpt_prt(mpt, "Unable to register user handler!\n");
1132545bca0SMatthew Dillon return (error);
1142545bca0SMatthew Dillon }
1152545bca0SMatthew Dillon unit = device_get_unit(mpt->dev);
1166d259fc1SSascha Wildner mpt->cdev = make_dev(&mpt_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
1172545bca0SMatthew Dillon "mpt%d", unit);
1182545bca0SMatthew Dillon if (mpt->cdev == NULL) {
1192545bca0SMatthew Dillon MPT_LOCK(mpt);
1202545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1212545bca0SMatthew Dillon user_handler_id);
1222545bca0SMatthew Dillon MPT_UNLOCK(mpt);
1232545bca0SMatthew Dillon return (ENOMEM);
1242545bca0SMatthew Dillon }
1252545bca0SMatthew Dillon mpt->cdev->si_drv1 = mpt;
1262545bca0SMatthew Dillon return (0);
1272545bca0SMatthew Dillon }
1282545bca0SMatthew Dillon
1294c42baf4SSascha Wildner static int
mpt_user_enable(struct mpt_softc * mpt)1302545bca0SMatthew Dillon mpt_user_enable(struct mpt_softc *mpt)
1312545bca0SMatthew Dillon {
1322545bca0SMatthew Dillon
1332545bca0SMatthew Dillon return (0);
1342545bca0SMatthew Dillon }
1352545bca0SMatthew Dillon
1364c42baf4SSascha Wildner static void
mpt_user_ready(struct mpt_softc * mpt)1372545bca0SMatthew Dillon mpt_user_ready(struct mpt_softc *mpt)
1382545bca0SMatthew Dillon {
1394c42baf4SSascha Wildner
1402545bca0SMatthew Dillon }
1412545bca0SMatthew Dillon
1424c42baf4SSascha Wildner static int
mpt_user_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)1432545bca0SMatthew Dillon mpt_user_event(struct mpt_softc *mpt, request_t *req,
1442545bca0SMatthew Dillon MSG_EVENT_NOTIFY_REPLY *msg)
1452545bca0SMatthew Dillon {
1462545bca0SMatthew Dillon
1472545bca0SMatthew Dillon /* Someday we may want to let a user daemon listen for events? */
1482545bca0SMatthew Dillon return (0);
1492545bca0SMatthew Dillon }
1502545bca0SMatthew Dillon
1514c42baf4SSascha Wildner static void
mpt_user_reset(struct mpt_softc * mpt,int type)1522545bca0SMatthew Dillon mpt_user_reset(struct mpt_softc *mpt, int type)
1532545bca0SMatthew Dillon {
1544c42baf4SSascha Wildner
1552545bca0SMatthew Dillon }
1562545bca0SMatthew Dillon
1574c42baf4SSascha Wildner static void
mpt_user_detach(struct mpt_softc * mpt)1582545bca0SMatthew Dillon mpt_user_detach(struct mpt_softc *mpt)
1592545bca0SMatthew Dillon {
1602545bca0SMatthew Dillon mpt_handler_t handler;
1612545bca0SMatthew Dillon
1622545bca0SMatthew Dillon /* XXX: do a purge of pending requests? */
1632545bca0SMatthew Dillon destroy_dev(mpt->cdev);
1642545bca0SMatthew Dillon
1652545bca0SMatthew Dillon MPT_LOCK(mpt);
1662545bca0SMatthew Dillon handler.reply_handler = mpt_user_reply_handler;
1672545bca0SMatthew Dillon mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1682545bca0SMatthew Dillon user_handler_id);
1692545bca0SMatthew Dillon MPT_UNLOCK(mpt);
1702545bca0SMatthew Dillon }
1712545bca0SMatthew Dillon
1722545bca0SMatthew Dillon static int
mpt_open(struct dev_open_args * ap)1732545bca0SMatthew Dillon mpt_open(struct dev_open_args *ap)
1742545bca0SMatthew Dillon {
1752545bca0SMatthew Dillon
1762545bca0SMatthew Dillon return (0);
1772545bca0SMatthew Dillon }
1782545bca0SMatthew Dillon
1792545bca0SMatthew Dillon static int
mpt_close(struct dev_close_args * ap)1802545bca0SMatthew Dillon mpt_close(struct dev_close_args *ap)
1812545bca0SMatthew Dillon {
1822545bca0SMatthew Dillon
1832545bca0SMatthew Dillon return (0);
1842545bca0SMatthew Dillon }
1852545bca0SMatthew Dillon
1862545bca0SMatthew Dillon static int
mpt_alloc_buffer(struct mpt_softc * mpt,struct mpt_page_memory * page_mem,size_t len)1872545bca0SMatthew Dillon mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
1882545bca0SMatthew Dillon size_t len)
1892545bca0SMatthew Dillon {
1902545bca0SMatthew Dillon struct mpt_map_info mi;
1912545bca0SMatthew Dillon int error;
1922545bca0SMatthew Dillon
1932545bca0SMatthew Dillon page_mem->vaddr = NULL;
1942545bca0SMatthew Dillon
1952545bca0SMatthew Dillon /* Limit requests to 16M. */
1962545bca0SMatthew Dillon if (len > 16 * 1024 * 1024)
1972545bca0SMatthew Dillon return (ENOSPC);
1982545bca0SMatthew Dillon error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
1992545bca0SMatthew Dillon BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2002545bca0SMatthew Dillon len, 1, len, 0, &page_mem->tag);
2012545bca0SMatthew Dillon if (error)
2022545bca0SMatthew Dillon return (error);
2032545bca0SMatthew Dillon error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
2046d259fc1SSascha Wildner BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
2052545bca0SMatthew Dillon if (error) {
2062545bca0SMatthew Dillon bus_dma_tag_destroy(page_mem->tag);
2072545bca0SMatthew Dillon return (error);
2082545bca0SMatthew Dillon }
2092545bca0SMatthew Dillon mi.mpt = mpt;
2102545bca0SMatthew Dillon error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
2112545bca0SMatthew Dillon len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
2122545bca0SMatthew Dillon if (error == 0)
2132545bca0SMatthew Dillon error = mi.error;
2142545bca0SMatthew Dillon if (error) {
2152545bca0SMatthew Dillon bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
2162545bca0SMatthew Dillon bus_dma_tag_destroy(page_mem->tag);
2172545bca0SMatthew Dillon page_mem->vaddr = NULL;
2182545bca0SMatthew Dillon return (error);
2192545bca0SMatthew Dillon }
2202545bca0SMatthew Dillon page_mem->paddr = mi.phys;
2212545bca0SMatthew Dillon return (0);
2222545bca0SMatthew Dillon }
2232545bca0SMatthew Dillon
2242545bca0SMatthew Dillon static void
mpt_free_buffer(struct mpt_page_memory * page_mem)2252545bca0SMatthew Dillon mpt_free_buffer(struct mpt_page_memory *page_mem)
2262545bca0SMatthew Dillon {
2272545bca0SMatthew Dillon
2282545bca0SMatthew Dillon if (page_mem->vaddr == NULL)
2292545bca0SMatthew Dillon return;
2302545bca0SMatthew Dillon bus_dmamap_unload(page_mem->tag, page_mem->map);
2312545bca0SMatthew Dillon bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
2322545bca0SMatthew Dillon bus_dma_tag_destroy(page_mem->tag);
2332545bca0SMatthew Dillon page_mem->vaddr = NULL;
2342545bca0SMatthew Dillon }
2352545bca0SMatthew Dillon
2362545bca0SMatthew Dillon static int
mpt_user_read_cfg_header(struct mpt_softc * mpt,struct mpt_cfg_page_req * page_req)2372545bca0SMatthew Dillon mpt_user_read_cfg_header(struct mpt_softc *mpt,
2382545bca0SMatthew Dillon struct mpt_cfg_page_req *page_req)
2392545bca0SMatthew Dillon {
2402545bca0SMatthew Dillon request_t *req;
2412545bca0SMatthew Dillon cfgparms_t params;
2422545bca0SMatthew Dillon MSG_CONFIG *cfgp;
2432545bca0SMatthew Dillon int error;
2442545bca0SMatthew Dillon
2452545bca0SMatthew Dillon req = mpt_get_request(mpt, TRUE);
2462545bca0SMatthew Dillon if (req == NULL) {
2472545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
2482545bca0SMatthew Dillon return (ENOMEM);
2492545bca0SMatthew Dillon }
2502545bca0SMatthew Dillon
2512545bca0SMatthew Dillon params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
2522545bca0SMatthew Dillon params.PageVersion = 0;
2532545bca0SMatthew Dillon params.PageLength = 0;
2542545bca0SMatthew Dillon params.PageNumber = page_req->header.PageNumber;
2552545bca0SMatthew Dillon params.PageType = page_req->header.PageType;
2562545bca0SMatthew Dillon params.PageAddress = le32toh(page_req->page_address);
2572545bca0SMatthew Dillon error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
2582545bca0SMatthew Dillon TRUE, 5000);
2592545bca0SMatthew Dillon if (error != 0) {
2602545bca0SMatthew Dillon /*
2612545bca0SMatthew Dillon * Leave the request. Without resetting the chip, it's
2622545bca0SMatthew Dillon * still owned by it and we'll just get into trouble
2632545bca0SMatthew Dillon * freeing it now. Mark it as abandoned so that if it
2642545bca0SMatthew Dillon * shows up later it can be freed.
2652545bca0SMatthew Dillon */
2662545bca0SMatthew Dillon mpt_prt(mpt, "read_cfg_header timed out\n");
2672545bca0SMatthew Dillon return (ETIMEDOUT);
2682545bca0SMatthew Dillon }
2692545bca0SMatthew Dillon
2702545bca0SMatthew Dillon page_req->ioc_status = htole16(req->IOCStatus);
2712545bca0SMatthew Dillon if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
2722545bca0SMatthew Dillon cfgp = req->req_vbuf;
2732545bca0SMatthew Dillon bcopy(&cfgp->Header, &page_req->header,
2742545bca0SMatthew Dillon sizeof(page_req->header));
2752545bca0SMatthew Dillon }
2762545bca0SMatthew Dillon mpt_free_request(mpt, req);
2772545bca0SMatthew Dillon return (0);
2782545bca0SMatthew Dillon }
2792545bca0SMatthew Dillon
2802545bca0SMatthew Dillon static int
mpt_user_read_cfg_page(struct mpt_softc * mpt,struct mpt_cfg_page_req * page_req,struct mpt_page_memory * mpt_page)2812545bca0SMatthew Dillon mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
2822545bca0SMatthew Dillon struct mpt_page_memory *mpt_page)
2832545bca0SMatthew Dillon {
2842545bca0SMatthew Dillon CONFIG_PAGE_HEADER *hdr;
2852545bca0SMatthew Dillon request_t *req;
2862545bca0SMatthew Dillon cfgparms_t params;
2872545bca0SMatthew Dillon int error;
2882545bca0SMatthew Dillon
2892545bca0SMatthew Dillon req = mpt_get_request(mpt, TRUE);
2902545bca0SMatthew Dillon if (req == NULL) {
2912545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
2922545bca0SMatthew Dillon return (ENOMEM);
2932545bca0SMatthew Dillon }
2942545bca0SMatthew Dillon
2952545bca0SMatthew Dillon hdr = mpt_page->vaddr;
2962545bca0SMatthew Dillon params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2972545bca0SMatthew Dillon params.PageVersion = hdr->PageVersion;
2982545bca0SMatthew Dillon params.PageLength = hdr->PageLength;
2992545bca0SMatthew Dillon params.PageNumber = hdr->PageNumber;
3002545bca0SMatthew Dillon params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
3012545bca0SMatthew Dillon params.PageAddress = le32toh(page_req->page_address);
3026d259fc1SSascha Wildner bus_dmamap_sync(mpt_page->tag, mpt_page->map,
3036d259fc1SSascha Wildner BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3042545bca0SMatthew Dillon error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr,
3052545bca0SMatthew Dillon le32toh(page_req->len), TRUE, 5000);
3062545bca0SMatthew Dillon if (error != 0) {
3072545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
3082545bca0SMatthew Dillon return (ETIMEDOUT);
3092545bca0SMatthew Dillon }
3102545bca0SMatthew Dillon
3112545bca0SMatthew Dillon page_req->ioc_status = htole16(req->IOCStatus);
3122545bca0SMatthew Dillon if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
3132545bca0SMatthew Dillon bus_dmamap_sync(mpt_page->tag, mpt_page->map,
3146d259fc1SSascha Wildner BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3152545bca0SMatthew Dillon mpt_free_request(mpt, req);
3162545bca0SMatthew Dillon return (0);
3172545bca0SMatthew Dillon }
3182545bca0SMatthew Dillon
3192545bca0SMatthew Dillon static int
mpt_user_read_extcfg_header(struct mpt_softc * mpt,struct mpt_ext_cfg_page_req * ext_page_req)3202545bca0SMatthew Dillon mpt_user_read_extcfg_header(struct mpt_softc *mpt,
3212545bca0SMatthew Dillon struct mpt_ext_cfg_page_req *ext_page_req)
3222545bca0SMatthew Dillon {
3232545bca0SMatthew Dillon request_t *req;
3242545bca0SMatthew Dillon cfgparms_t params;
3252545bca0SMatthew Dillon MSG_CONFIG_REPLY *cfgp;
3262545bca0SMatthew Dillon int error;
3272545bca0SMatthew Dillon
3282545bca0SMatthew Dillon req = mpt_get_request(mpt, TRUE);
3292545bca0SMatthew Dillon if (req == NULL) {
3302545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
3312545bca0SMatthew Dillon return (ENOMEM);
3322545bca0SMatthew Dillon }
3332545bca0SMatthew Dillon
3342545bca0SMatthew Dillon params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
3352545bca0SMatthew Dillon params.PageVersion = ext_page_req->header.PageVersion;
3362545bca0SMatthew Dillon params.PageLength = 0;
3372545bca0SMatthew Dillon params.PageNumber = ext_page_req->header.PageNumber;
3382545bca0SMatthew Dillon params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
3392545bca0SMatthew Dillon params.PageAddress = le32toh(ext_page_req->page_address);
3402545bca0SMatthew Dillon params.ExtPageType = ext_page_req->header.ExtPageType;
3412545bca0SMatthew Dillon params.ExtPageLength = 0;
3422545bca0SMatthew Dillon error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
3432545bca0SMatthew Dillon TRUE, 5000);
3442545bca0SMatthew Dillon if (error != 0) {
3452545bca0SMatthew Dillon /*
3462545bca0SMatthew Dillon * Leave the request. Without resetting the chip, it's
3472545bca0SMatthew Dillon * still owned by it and we'll just get into trouble
3482545bca0SMatthew Dillon * freeing it now. Mark it as abandoned so that if it
3492545bca0SMatthew Dillon * shows up later it can be freed.
3502545bca0SMatthew Dillon */
3512545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
3522545bca0SMatthew Dillon return (ETIMEDOUT);
3532545bca0SMatthew Dillon }
3542545bca0SMatthew Dillon
3552545bca0SMatthew Dillon ext_page_req->ioc_status = htole16(req->IOCStatus);
3562545bca0SMatthew Dillon if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
3572545bca0SMatthew Dillon cfgp = req->req_vbuf;
3582545bca0SMatthew Dillon ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
3592545bca0SMatthew Dillon ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
3602545bca0SMatthew Dillon ext_page_req->header.PageType = cfgp->Header.PageType;
3612545bca0SMatthew Dillon ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
3622545bca0SMatthew Dillon ext_page_req->header.ExtPageType = cfgp->ExtPageType;
3632545bca0SMatthew Dillon }
3642545bca0SMatthew Dillon mpt_free_request(mpt, req);
3652545bca0SMatthew Dillon return (0);
3662545bca0SMatthew Dillon }
3672545bca0SMatthew Dillon
3682545bca0SMatthew Dillon static int
mpt_user_read_extcfg_page(struct mpt_softc * mpt,struct mpt_ext_cfg_page_req * ext_page_req,struct mpt_page_memory * mpt_page)3692545bca0SMatthew Dillon mpt_user_read_extcfg_page(struct mpt_softc *mpt,
3702545bca0SMatthew Dillon struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
3712545bca0SMatthew Dillon {
3722545bca0SMatthew Dillon CONFIG_EXTENDED_PAGE_HEADER *hdr;
3732545bca0SMatthew Dillon request_t *req;
3742545bca0SMatthew Dillon cfgparms_t params;
3752545bca0SMatthew Dillon int error;
3762545bca0SMatthew Dillon
3772545bca0SMatthew Dillon req = mpt_get_request(mpt, TRUE);
3782545bca0SMatthew Dillon if (req == NULL) {
3792545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
3802545bca0SMatthew Dillon return (ENOMEM);
3812545bca0SMatthew Dillon }
3822545bca0SMatthew Dillon
3832545bca0SMatthew Dillon hdr = mpt_page->vaddr;
3842545bca0SMatthew Dillon params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
3852545bca0SMatthew Dillon params.PageVersion = hdr->PageVersion;
3862545bca0SMatthew Dillon params.PageLength = 0;
3872545bca0SMatthew Dillon params.PageNumber = hdr->PageNumber;
3882545bca0SMatthew Dillon params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
3892545bca0SMatthew Dillon params.PageAddress = le32toh(ext_page_req->page_address);
3902545bca0SMatthew Dillon params.ExtPageType = hdr->ExtPageType;
3912545bca0SMatthew Dillon params.ExtPageLength = hdr->ExtPageLength;
3926d259fc1SSascha Wildner bus_dmamap_sync(mpt_page->tag, mpt_page->map,
3936d259fc1SSascha Wildner BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3942545bca0SMatthew Dillon error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr,
3952545bca0SMatthew Dillon le32toh(ext_page_req->len), TRUE, 5000);
3962545bca0SMatthew Dillon if (error != 0) {
3972545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
3982545bca0SMatthew Dillon return (ETIMEDOUT);
3992545bca0SMatthew Dillon }
4002545bca0SMatthew Dillon
4012545bca0SMatthew Dillon ext_page_req->ioc_status = htole16(req->IOCStatus);
4022545bca0SMatthew Dillon if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
4032545bca0SMatthew Dillon bus_dmamap_sync(mpt_page->tag, mpt_page->map,
4046d259fc1SSascha Wildner BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4052545bca0SMatthew Dillon mpt_free_request(mpt, req);
4062545bca0SMatthew Dillon return (0);
4072545bca0SMatthew Dillon }
4082545bca0SMatthew Dillon
4092545bca0SMatthew Dillon static int
mpt_user_write_cfg_page(struct mpt_softc * mpt,struct mpt_cfg_page_req * page_req,struct mpt_page_memory * mpt_page)4102545bca0SMatthew Dillon mpt_user_write_cfg_page(struct mpt_softc *mpt,
4112545bca0SMatthew Dillon struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
4122545bca0SMatthew Dillon {
4132545bca0SMatthew Dillon CONFIG_PAGE_HEADER *hdr;
4142545bca0SMatthew Dillon request_t *req;
4152545bca0SMatthew Dillon cfgparms_t params;
4162545bca0SMatthew Dillon u_int hdr_attr;
4172545bca0SMatthew Dillon int error;
4182545bca0SMatthew Dillon
4192545bca0SMatthew Dillon hdr = mpt_page->vaddr;
4202545bca0SMatthew Dillon hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
4212545bca0SMatthew Dillon if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
4222545bca0SMatthew Dillon hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
4232545bca0SMatthew Dillon mpt_prt(mpt, "page type 0x%x not changeable\n",
4242545bca0SMatthew Dillon hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
4252545bca0SMatthew Dillon return (EINVAL);
4262545bca0SMatthew Dillon }
4272545bca0SMatthew Dillon
4282545bca0SMatthew Dillon #if 0
4292545bca0SMatthew Dillon /*
4302545bca0SMatthew Dillon * We shouldn't mask off other bits here.
4312545bca0SMatthew Dillon */
4322545bca0SMatthew Dillon hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
4332545bca0SMatthew Dillon #endif
4342545bca0SMatthew Dillon
4352545bca0SMatthew Dillon req = mpt_get_request(mpt, TRUE);
4362545bca0SMatthew Dillon if (req == NULL)
4372545bca0SMatthew Dillon return (ENOMEM);
4382545bca0SMatthew Dillon
4396d259fc1SSascha Wildner bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
4406d259fc1SSascha Wildner BUS_DMASYNC_PREWRITE);
4412545bca0SMatthew Dillon
4422545bca0SMatthew Dillon /*
4432545bca0SMatthew Dillon * There isn't any point in restoring stripped out attributes
4442545bca0SMatthew Dillon * if you then mask them going down to issue the request.
4452545bca0SMatthew Dillon */
4462545bca0SMatthew Dillon
4472545bca0SMatthew Dillon params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
4482545bca0SMatthew Dillon params.PageVersion = hdr->PageVersion;
4492545bca0SMatthew Dillon params.PageLength = hdr->PageLength;
4502545bca0SMatthew Dillon params.PageNumber = hdr->PageNumber;
4512545bca0SMatthew Dillon params.PageAddress = le32toh(page_req->page_address);
4522545bca0SMatthew Dillon #if 0
4532545bca0SMatthew Dillon /* Restore stripped out attributes */
4542545bca0SMatthew Dillon hdr->PageType |= hdr_attr;
4552545bca0SMatthew Dillon params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
4562545bca0SMatthew Dillon #else
4572545bca0SMatthew Dillon params.PageType = hdr->PageType;
4582545bca0SMatthew Dillon #endif
4592545bca0SMatthew Dillon error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr,
4602545bca0SMatthew Dillon le32toh(page_req->len), TRUE, 5000);
4612545bca0SMatthew Dillon if (error != 0) {
4622545bca0SMatthew Dillon mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
4632545bca0SMatthew Dillon return (ETIMEDOUT);
4642545bca0SMatthew Dillon }
4652545bca0SMatthew Dillon
4662545bca0SMatthew Dillon page_req->ioc_status = htole16(req->IOCStatus);
4676d259fc1SSascha Wildner bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
4686d259fc1SSascha Wildner BUS_DMASYNC_POSTWRITE);
4692545bca0SMatthew Dillon mpt_free_request(mpt, req);
4702545bca0SMatthew Dillon return (0);
4712545bca0SMatthew Dillon }
4722545bca0SMatthew Dillon
4732545bca0SMatthew Dillon static int
mpt_user_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)4742545bca0SMatthew Dillon mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
4752545bca0SMatthew Dillon uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4762545bca0SMatthew Dillon {
4772545bca0SMatthew Dillon MSG_RAID_ACTION_REPLY *reply;
4782545bca0SMatthew Dillon struct mpt_user_raid_action_result *res;
4792545bca0SMatthew Dillon
4802545bca0SMatthew Dillon if (req == NULL)
4812545bca0SMatthew Dillon return (TRUE);
4822545bca0SMatthew Dillon
4832545bca0SMatthew Dillon if (reply_frame != NULL) {
4842545bca0SMatthew Dillon reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
4852545bca0SMatthew Dillon req->IOCStatus = le16toh(reply->IOCStatus);
4862545bca0SMatthew Dillon res = (struct mpt_user_raid_action_result *)
4872545bca0SMatthew Dillon (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
4882545bca0SMatthew Dillon res->action_status = reply->ActionStatus;
4892545bca0SMatthew Dillon res->volume_status = reply->VolumeStatus;
4902545bca0SMatthew Dillon bcopy(&reply->ActionData, res->action_data,
4912545bca0SMatthew Dillon sizeof(res->action_data));
4922545bca0SMatthew Dillon }
4932545bca0SMatthew Dillon
4942545bca0SMatthew Dillon req->state &= ~REQ_STATE_QUEUED;
4952545bca0SMatthew Dillon req->state |= REQ_STATE_DONE;
4962545bca0SMatthew Dillon TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4972545bca0SMatthew Dillon
4982545bca0SMatthew Dillon if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
4992545bca0SMatthew Dillon wakeup(req);
5002545bca0SMatthew Dillon } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
5012545bca0SMatthew Dillon /*
5022545bca0SMatthew Dillon * Whew- we can free this request (late completion)
5032545bca0SMatthew Dillon */
5042545bca0SMatthew Dillon mpt_free_request(mpt, req);
5052545bca0SMatthew Dillon }
5062545bca0SMatthew Dillon
5072545bca0SMatthew Dillon return (TRUE);
5082545bca0SMatthew Dillon }
5092545bca0SMatthew Dillon
5102545bca0SMatthew Dillon /*
5112545bca0SMatthew Dillon * We use the first part of the request buffer after the request frame
5122545bca0SMatthew Dillon * to hold the action data and action status from the RAID reply. The
5132545bca0SMatthew Dillon * rest of the request buffer is used to hold the buffer for the
5142545bca0SMatthew Dillon * action SGE.
5152545bca0SMatthew Dillon */
5162545bca0SMatthew Dillon static int
mpt_user_raid_action(struct mpt_softc * mpt,struct mpt_raid_action * raid_act,struct mpt_page_memory * mpt_page)5172545bca0SMatthew Dillon mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
5182545bca0SMatthew Dillon struct mpt_page_memory *mpt_page)
5192545bca0SMatthew Dillon {
5202545bca0SMatthew Dillon request_t *req;
5212545bca0SMatthew Dillon struct mpt_user_raid_action_result *res;
5222545bca0SMatthew Dillon MSG_RAID_ACTION_REQUEST *rap;
5232545bca0SMatthew Dillon SGE_SIMPLE32 *se;
5242545bca0SMatthew Dillon int error;
5252545bca0SMatthew Dillon
5262545bca0SMatthew Dillon req = mpt_get_request(mpt, TRUE);
5272545bca0SMatthew Dillon if (req == NULL)
5282545bca0SMatthew Dillon return (ENOMEM);
5292545bca0SMatthew Dillon rap = req->req_vbuf;
5302545bca0SMatthew Dillon memset(rap, 0, sizeof *rap);
5312545bca0SMatthew Dillon rap->Action = raid_act->action;
5322545bca0SMatthew Dillon rap->ActionDataWord = raid_act->action_data_word;
5332545bca0SMatthew Dillon rap->Function = MPI_FUNCTION_RAID_ACTION;
5342545bca0SMatthew Dillon rap->VolumeID = raid_act->volume_id;
5352545bca0SMatthew Dillon rap->VolumeBus = raid_act->volume_bus;
5362545bca0SMatthew Dillon rap->PhysDiskNum = raid_act->phys_disk_num;
5372545bca0SMatthew Dillon se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
5382545bca0SMatthew Dillon if (mpt_page->vaddr != NULL && raid_act->len != 0) {
5392545bca0SMatthew Dillon bus_dmamap_sync(mpt_page->tag, mpt_page->map,
5406d259fc1SSascha Wildner BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5412545bca0SMatthew Dillon se->Address = htole32(mpt_page->paddr);
5422545bca0SMatthew Dillon MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
5432545bca0SMatthew Dillon MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
5442545bca0SMatthew Dillon MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
5452545bca0SMatthew Dillon MPI_SGE_FLAGS_END_OF_LIST |
5467c25255aSSascha Wildner (raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
5477c25255aSSascha Wildner MPI_SGE_FLAGS_IOC_TO_HOST)));
5482545bca0SMatthew Dillon }
5492545bca0SMatthew Dillon se->FlagsLength = htole32(se->FlagsLength);
5502545bca0SMatthew Dillon rap->MsgContext = htole32(req->index | user_handler_id);
5512545bca0SMatthew Dillon
5522545bca0SMatthew Dillon mpt_check_doorbell(mpt);
5532545bca0SMatthew Dillon mpt_send_cmd(mpt, req);
5542545bca0SMatthew Dillon
5552545bca0SMatthew Dillon error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
5562545bca0SMatthew Dillon 2000);
5572545bca0SMatthew Dillon if (error != 0) {
5582545bca0SMatthew Dillon /*
5592545bca0SMatthew Dillon * Leave request so it can be cleaned up later.
5602545bca0SMatthew Dillon */
5612545bca0SMatthew Dillon mpt_prt(mpt, "mpt_user_raid_action timed out\n");
5622545bca0SMatthew Dillon return (error);
5632545bca0SMatthew Dillon }
5642545bca0SMatthew Dillon
5652545bca0SMatthew Dillon raid_act->ioc_status = htole16(req->IOCStatus);
5662545bca0SMatthew Dillon if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
5672545bca0SMatthew Dillon mpt_free_request(mpt, req);
5682545bca0SMatthew Dillon return (0);
5692545bca0SMatthew Dillon }
5702545bca0SMatthew Dillon
5712545bca0SMatthew Dillon res = (struct mpt_user_raid_action_result *)
5722545bca0SMatthew Dillon (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
5732545bca0SMatthew Dillon raid_act->volume_status = res->volume_status;
5742545bca0SMatthew Dillon raid_act->action_status = res->action_status;
5752545bca0SMatthew Dillon bcopy(res->action_data, raid_act->action_data,
5762545bca0SMatthew Dillon sizeof(res->action_data));
5772545bca0SMatthew Dillon if (mpt_page->vaddr != NULL)
5782545bca0SMatthew Dillon bus_dmamap_sync(mpt_page->tag, mpt_page->map,
5796d259fc1SSascha Wildner BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5802545bca0SMatthew Dillon mpt_free_request(mpt, req);
5812545bca0SMatthew Dillon return (0);
5822545bca0SMatthew Dillon }
5832545bca0SMatthew Dillon
584b2b3ffcdSSimon Schubert #ifdef __x86_64__
5852545bca0SMatthew Dillon #define PTRIN(p) ((void *)(uintptr_t)(p))
5862545bca0SMatthew Dillon #define PTROUT(v) ((u_int32_t)(uintptr_t)(v))
5872545bca0SMatthew Dillon #endif
5882545bca0SMatthew Dillon
5892545bca0SMatthew Dillon static int
mpt_ioctl(struct dev_ioctl_args * ap)5902545bca0SMatthew Dillon mpt_ioctl(struct dev_ioctl_args *ap)
5912545bca0SMatthew Dillon {
5926d259fc1SSascha Wildner cdev_t dev = ap->a_head.a_dev;
5936d259fc1SSascha Wildner u_long cmd = ap->a_cmd;
5946d259fc1SSascha Wildner caddr_t arg = ap->a_data;
5952545bca0SMatthew Dillon struct mpt_softc *mpt;
5962545bca0SMatthew Dillon struct mpt_cfg_page_req *page_req;
5972545bca0SMatthew Dillon struct mpt_ext_cfg_page_req *ext_page_req;
5982545bca0SMatthew Dillon struct mpt_raid_action *raid_act;
5992545bca0SMatthew Dillon struct mpt_page_memory mpt_page;
600b2b3ffcdSSimon Schubert #ifdef __x86_64__
6012545bca0SMatthew Dillon struct mpt_cfg_page_req32 *page_req32;
6022545bca0SMatthew Dillon struct mpt_cfg_page_req page_req_swab;
6032545bca0SMatthew Dillon struct mpt_ext_cfg_page_req32 *ext_page_req32;
6042545bca0SMatthew Dillon struct mpt_ext_cfg_page_req ext_page_req_swab;
6052545bca0SMatthew Dillon struct mpt_raid_action32 *raid_act32;
6062545bca0SMatthew Dillon struct mpt_raid_action raid_act_swab;
6072545bca0SMatthew Dillon #endif
6082545bca0SMatthew Dillon int error;
6092545bca0SMatthew Dillon
6106d259fc1SSascha Wildner mpt = dev->si_drv1;
6112545bca0SMatthew Dillon page_req = (void *)arg;
6122545bca0SMatthew Dillon ext_page_req = (void *)arg;
6132545bca0SMatthew Dillon raid_act = (void *)arg;
6142545bca0SMatthew Dillon mpt_page.vaddr = NULL;
6152545bca0SMatthew Dillon
616b2b3ffcdSSimon Schubert #ifdef __x86_64__
6172545bca0SMatthew Dillon /* Convert 32-bit structs to native ones. */
6182545bca0SMatthew Dillon page_req32 = (void *)arg;
6192545bca0SMatthew Dillon ext_page_req32 = (void *)arg;
6202545bca0SMatthew Dillon raid_act32 = (void *)arg;
6212545bca0SMatthew Dillon switch (cmd) {
6222545bca0SMatthew Dillon case MPTIO_READ_CFG_HEADER32:
6232545bca0SMatthew Dillon case MPTIO_READ_CFG_PAGE32:
6242545bca0SMatthew Dillon case MPTIO_WRITE_CFG_PAGE32:
6252545bca0SMatthew Dillon page_req = &page_req_swab;
6262545bca0SMatthew Dillon page_req->header = page_req32->header;
6272545bca0SMatthew Dillon page_req->page_address = page_req32->page_address;
6282545bca0SMatthew Dillon page_req->buf = PTRIN(page_req32->buf);
6292545bca0SMatthew Dillon page_req->len = page_req32->len;
6302545bca0SMatthew Dillon page_req->ioc_status = page_req32->ioc_status;
6312545bca0SMatthew Dillon break;
6322545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_HEADER32:
6332545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_PAGE32:
6342545bca0SMatthew Dillon ext_page_req = &ext_page_req_swab;
6352545bca0SMatthew Dillon ext_page_req->header = ext_page_req32->header;
6362545bca0SMatthew Dillon ext_page_req->page_address = ext_page_req32->page_address;
6372545bca0SMatthew Dillon ext_page_req->buf = PTRIN(ext_page_req32->buf);
6382545bca0SMatthew Dillon ext_page_req->len = ext_page_req32->len;
6392545bca0SMatthew Dillon ext_page_req->ioc_status = ext_page_req32->ioc_status;
6402545bca0SMatthew Dillon break;
6412545bca0SMatthew Dillon case MPTIO_RAID_ACTION32:
6422545bca0SMatthew Dillon raid_act = &raid_act_swab;
6432545bca0SMatthew Dillon raid_act->action = raid_act32->action;
6442545bca0SMatthew Dillon raid_act->volume_bus = raid_act32->volume_bus;
6452545bca0SMatthew Dillon raid_act->volume_id = raid_act32->volume_id;
6462545bca0SMatthew Dillon raid_act->phys_disk_num = raid_act32->phys_disk_num;
6472545bca0SMatthew Dillon raid_act->action_data_word = raid_act32->action_data_word;
6482545bca0SMatthew Dillon raid_act->buf = PTRIN(raid_act32->buf);
6492545bca0SMatthew Dillon raid_act->len = raid_act32->len;
6502545bca0SMatthew Dillon raid_act->volume_status = raid_act32->volume_status;
6512545bca0SMatthew Dillon bcopy(raid_act32->action_data, raid_act->action_data,
6522545bca0SMatthew Dillon sizeof(raid_act->action_data));
6532545bca0SMatthew Dillon raid_act->action_status = raid_act32->action_status;
6542545bca0SMatthew Dillon raid_act->ioc_status = raid_act32->ioc_status;
6552545bca0SMatthew Dillon raid_act->write = raid_act32->write;
6562545bca0SMatthew Dillon break;
6572545bca0SMatthew Dillon }
6582545bca0SMatthew Dillon #endif
6592545bca0SMatthew Dillon
6602545bca0SMatthew Dillon switch (cmd) {
661b2b3ffcdSSimon Schubert #ifdef __x86_64__
6622545bca0SMatthew Dillon case MPTIO_READ_CFG_HEADER32:
6632545bca0SMatthew Dillon #endif
6642545bca0SMatthew Dillon case MPTIO_READ_CFG_HEADER:
6652545bca0SMatthew Dillon MPT_LOCK(mpt);
6662545bca0SMatthew Dillon error = mpt_user_read_cfg_header(mpt, page_req);
6672545bca0SMatthew Dillon MPT_UNLOCK(mpt);
6682545bca0SMatthew Dillon break;
669b2b3ffcdSSimon Schubert #ifdef __x86_64__
6702545bca0SMatthew Dillon case MPTIO_READ_CFG_PAGE32:
6712545bca0SMatthew Dillon #endif
6722545bca0SMatthew Dillon case MPTIO_READ_CFG_PAGE:
6732545bca0SMatthew Dillon error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
6742545bca0SMatthew Dillon if (error)
6752545bca0SMatthew Dillon break;
6762545bca0SMatthew Dillon error = copyin(page_req->buf, mpt_page.vaddr,
6772545bca0SMatthew Dillon sizeof(CONFIG_PAGE_HEADER));
6782545bca0SMatthew Dillon if (error)
6792545bca0SMatthew Dillon break;
6802545bca0SMatthew Dillon MPT_LOCK(mpt);
6812545bca0SMatthew Dillon error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
6822545bca0SMatthew Dillon MPT_UNLOCK(mpt);
6832545bca0SMatthew Dillon if (error)
6842545bca0SMatthew Dillon break;
6852545bca0SMatthew Dillon error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
6862545bca0SMatthew Dillon break;
687b2b3ffcdSSimon Schubert #ifdef __x86_64__
6882545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_HEADER32:
6892545bca0SMatthew Dillon #endif
6902545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_HEADER:
6912545bca0SMatthew Dillon MPT_LOCK(mpt);
6922545bca0SMatthew Dillon error = mpt_user_read_extcfg_header(mpt, ext_page_req);
6932545bca0SMatthew Dillon MPT_UNLOCK(mpt);
6942545bca0SMatthew Dillon break;
695b2b3ffcdSSimon Schubert #ifdef __x86_64__
6962545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_PAGE32:
6972545bca0SMatthew Dillon #endif
6982545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_PAGE:
6992545bca0SMatthew Dillon error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
7002545bca0SMatthew Dillon if (error)
7012545bca0SMatthew Dillon break;
7022545bca0SMatthew Dillon error = copyin(ext_page_req->buf, mpt_page.vaddr,
7032545bca0SMatthew Dillon sizeof(CONFIG_EXTENDED_PAGE_HEADER));
7042545bca0SMatthew Dillon if (error)
7052545bca0SMatthew Dillon break;
7062545bca0SMatthew Dillon MPT_LOCK(mpt);
7072545bca0SMatthew Dillon error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
7082545bca0SMatthew Dillon MPT_UNLOCK(mpt);
7092545bca0SMatthew Dillon if (error)
7102545bca0SMatthew Dillon break;
7112545bca0SMatthew Dillon error = copyout(mpt_page.vaddr, ext_page_req->buf,
7122545bca0SMatthew Dillon ext_page_req->len);
7132545bca0SMatthew Dillon break;
714b2b3ffcdSSimon Schubert #ifdef __x86_64__
7152545bca0SMatthew Dillon case MPTIO_WRITE_CFG_PAGE32:
7162545bca0SMatthew Dillon #endif
7172545bca0SMatthew Dillon case MPTIO_WRITE_CFG_PAGE:
7182545bca0SMatthew Dillon error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
7192545bca0SMatthew Dillon if (error)
7202545bca0SMatthew Dillon break;
7212545bca0SMatthew Dillon error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
7222545bca0SMatthew Dillon if (error)
7232545bca0SMatthew Dillon break;
7242545bca0SMatthew Dillon MPT_LOCK(mpt);
7252545bca0SMatthew Dillon error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
7262545bca0SMatthew Dillon MPT_UNLOCK(mpt);
7272545bca0SMatthew Dillon break;
728b2b3ffcdSSimon Schubert #ifdef __x86_64__
7292545bca0SMatthew Dillon case MPTIO_RAID_ACTION32:
7302545bca0SMatthew Dillon #endif
7312545bca0SMatthew Dillon case MPTIO_RAID_ACTION:
7322545bca0SMatthew Dillon if (raid_act->buf != NULL) {
7332545bca0SMatthew Dillon error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
7342545bca0SMatthew Dillon if (error)
7352545bca0SMatthew Dillon break;
7362545bca0SMatthew Dillon error = copyin(raid_act->buf, mpt_page.vaddr,
7372545bca0SMatthew Dillon raid_act->len);
7382545bca0SMatthew Dillon if (error)
7392545bca0SMatthew Dillon break;
7402545bca0SMatthew Dillon }
7412545bca0SMatthew Dillon MPT_LOCK(mpt);
7422545bca0SMatthew Dillon error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
7432545bca0SMatthew Dillon MPT_UNLOCK(mpt);
7442545bca0SMatthew Dillon if (error)
7452545bca0SMatthew Dillon break;
7462545bca0SMatthew Dillon if (raid_act->buf != NULL)
7472545bca0SMatthew Dillon error = copyout(mpt_page.vaddr, raid_act->buf,
7482545bca0SMatthew Dillon raid_act->len);
7492545bca0SMatthew Dillon break;
7502545bca0SMatthew Dillon default:
7512545bca0SMatthew Dillon error = ENOIOCTL;
7522545bca0SMatthew Dillon break;
7532545bca0SMatthew Dillon }
7542545bca0SMatthew Dillon
7552545bca0SMatthew Dillon mpt_free_buffer(&mpt_page);
7562545bca0SMatthew Dillon
7572545bca0SMatthew Dillon if (error)
7582545bca0SMatthew Dillon return (error);
7592545bca0SMatthew Dillon
760b2b3ffcdSSimon Schubert #ifdef __x86_64__
7612545bca0SMatthew Dillon /* Convert native structs to 32-bit ones. */
7622545bca0SMatthew Dillon switch (cmd) {
7632545bca0SMatthew Dillon case MPTIO_READ_CFG_HEADER32:
7642545bca0SMatthew Dillon case MPTIO_READ_CFG_PAGE32:
7652545bca0SMatthew Dillon case MPTIO_WRITE_CFG_PAGE32:
7662545bca0SMatthew Dillon page_req32->header = page_req->header;
7672545bca0SMatthew Dillon page_req32->page_address = page_req->page_address;
7682545bca0SMatthew Dillon page_req32->buf = PTROUT(page_req->buf);
7692545bca0SMatthew Dillon page_req32->len = page_req->len;
7702545bca0SMatthew Dillon page_req32->ioc_status = page_req->ioc_status;
7712545bca0SMatthew Dillon break;
7722545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_HEADER32:
7732545bca0SMatthew Dillon case MPTIO_READ_EXT_CFG_PAGE32:
7742545bca0SMatthew Dillon ext_page_req32->header = ext_page_req->header;
7752545bca0SMatthew Dillon ext_page_req32->page_address = ext_page_req->page_address;
7762545bca0SMatthew Dillon ext_page_req32->buf = PTROUT(ext_page_req->buf);
7772545bca0SMatthew Dillon ext_page_req32->len = ext_page_req->len;
7782545bca0SMatthew Dillon ext_page_req32->ioc_status = ext_page_req->ioc_status;
7792545bca0SMatthew Dillon break;
7802545bca0SMatthew Dillon case MPTIO_RAID_ACTION32:
7812545bca0SMatthew Dillon raid_act32->action = raid_act->action;
7822545bca0SMatthew Dillon raid_act32->volume_bus = raid_act->volume_bus;
7832545bca0SMatthew Dillon raid_act32->volume_id = raid_act->volume_id;
7842545bca0SMatthew Dillon raid_act32->phys_disk_num = raid_act->phys_disk_num;
7852545bca0SMatthew Dillon raid_act32->action_data_word = raid_act->action_data_word;
7862545bca0SMatthew Dillon raid_act32->buf = PTROUT(raid_act->buf);
7872545bca0SMatthew Dillon raid_act32->len = raid_act->len;
7882545bca0SMatthew Dillon raid_act32->volume_status = raid_act->volume_status;
7892545bca0SMatthew Dillon bcopy(raid_act->action_data, raid_act32->action_data,
7902545bca0SMatthew Dillon sizeof(raid_act->action_data));
7912545bca0SMatthew Dillon raid_act32->action_status = raid_act->action_status;
7922545bca0SMatthew Dillon raid_act32->ioc_status = raid_act->ioc_status;
7932545bca0SMatthew Dillon raid_act32->write = raid_act->write;
7942545bca0SMatthew Dillon break;
7952545bca0SMatthew Dillon }
7962545bca0SMatthew Dillon #endif
7972545bca0SMatthew Dillon
7982545bca0SMatthew Dillon return (0);
7992545bca0SMatthew Dillon }
800