Lines Matching full:mc

68 static int			mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
73 static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
78 static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
87 static void mlx_periodic_enquiry(struct mlx_command *mc);
89 static void mlx_periodic_eventlog_respond(struct mlx_command *mc);
90 static void mlx_periodic_rebuild(struct mlx_command *mc);
96 static void mlx_pause_done(struct mlx_command *mc);
102 void (*complete)(struct mlx_command *mc));
106 static int mlx_wait_command(struct mlx_command *mc);
107 static int mlx_poll_command(struct mlx_command *mc);
112 static void mlx_completeio(struct mlx_command *mc);
122 static void mlx_releasecmd(struct mlx_command *mc);
123 static void mlx_freecmd(struct mlx_command *mc);
128 static int mlx_getslot(struct mlx_command *mc);
129 static void mlx_setup_dmamap(struct mlx_command *mc,
132 static void mlx_unmapcmd(struct mlx_command *mc);
134 static int mlx_start(struct mlx_command *mc);
141 static char *mlx_diagnose_command(struct mlx_command *mc);
164 struct mlx_command *mc;
180 while ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) {
181 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link);
182 mlx_freecmd(mc);
1133 mlx_periodic_enquiry(struct mlx_command *mc)
1135 struct mlx_softc *sc = mc->mc_sc;
1141 if (mc->mc_status != 0) {
1142 device_printf(sc->mlx_dev, "periodic enquiry failed - %s\n", mlx_diagnose_command(mc));
1147 switch(mc->mc_mailbox[0]) {
1154 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data;
1155 struct mlx_enquiry_old *meo = (struct mlx_enquiry_old *)mc->mc_data;
1192 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data;
1212 struct mlx_enq_sys_drive *mes = (struct mlx_enq_sys_drive *)mc->mc_data;
1240 device_printf(sc->mlx_dev, "%s: unknown command 0x%x", __func__, mc->mc_mailbox[0]);
1245 free(mc->mc_data, M_DEVBUF);
1246 mlx_releasecmd(mc);
1252 struct mlx_command *mc;
1254 mc = (struct mlx_command *)arg;
1255 mlx_setup_dmamap(mc, segs, nsegments, error);
1258 mlx_make_type3(mc, MLX_CMD_LOGOP, MLX_LOGOP_GET, 1,
1259 mc->mc_sc->mlx_lastevent, 0, 0, mc->mc_dataphys, 0);
1260 mc->mc_complete = mlx_periodic_eventlog_respond;
1261 mc->mc_private = mc;
1264 if (mlx_start(mc) != 0) {
1265 mlx_releasecmd(mc);
1266 free(mc->mc_data, M_DEVBUF);
1267 mc->mc_data = NULL;
1279 struct mlx_command *mc;
1288 if ((mc = mlx_alloccmd(sc)) == NULL)
1297 if (mlx_getslot(mc))
1301 mc->mc_data = result;
1302 mc->mc_length = /*sizeof(struct mlx_eventlog_entry)*/1024;
1303 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
1304 mc->mc_length, mlx_eventlog_cb, mc, BUS_DMA_NOWAIT);
1308 if (mc != NULL)
1309 mlx_releasecmd(mc);
1310 if ((result != NULL) && (mc->mc_data != NULL))
1336 mlx_periodic_eventlog_respond(struct mlx_command *mc)
1338 struct mlx_softc *sc = mc->mc_sc;
1339 struct mlx_eventlog_entry *el = (struct mlx_eventlog_entry *)mc->mc_data;
1346 if (mc->mc_status == 0) {
1388 device_printf(sc->mlx_dev, "error reading message log - %s\n", mlx_diagnose_command(mc));
1394 free(mc->mc_data, M_DEVBUF);
1395 mlx_releasecmd(mc);
1410 mlx_periodic_rebuild(struct mlx_command *mc)
1412 struct mlx_softc *sc = mc->mc_sc;
1413 struct mlx_rebuild_status *mr = (struct mlx_rebuild_status *)mc->mc_data;
1416 switch(mc->mc_status) {
1446 free(mc->mc_data, M_DEVBUF);
1447 mlx_releasecmd(mc);
1463 struct mlx_command *mc;
1493 if ((mc = mlx_alloccmd(sc)) == NULL)
1496 mc->mc_flags |= MLX_CMD_PRIORITY;
1497 if (mlx_getslot(mc))
1501 mlx_make_type2(mc, command, (failsafe << 4) | i, 0, 0, 0, 0, 0, 0, 0);
1502 mc->mc_complete = mlx_pause_done;
1503 mc->mc_private = sc; /* XXX not needed */
1504 if (mlx_start(mc))
1512 if (mc != NULL)
1513 mlx_releasecmd(mc);
1519 mlx_pause_done(struct mlx_command *mc)
1521 struct mlx_softc *sc = mc->mc_sc;
1522 int command = mc->mc_mailbox[0];
1523 int channel = mc->mc_mailbox[2] & 0xf;
1526 if (mc->mc_status != 0) {
1528 command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", mlx_diagnose_command(mc));
1535 mlx_releasecmd(mc);
1548 struct mlx_command *mc;
1550 mc = (struct mlx_command *)arg;
1554 mlx_setup_dmamap(mc, segs, nsegments, error);
1557 sc = mc->mc_sc;
1558 mlx_make_type2(mc, mc->mc_command, 0, 0, 0, 0, 0, 0, mc->mc_dataphys, 0);
1561 if (mc->mc_complete != NULL) {
1562 if ((error = mlx_start(mc)) != 0)
1566 if ((sc->mlx_state & MLX_STATE_INTEN) ? mlx_wait_command(mc) :
1567 mlx_poll_command(mc))
1571 if (mc->mc_status != 0) {
1573 mlx_diagnose_command(mc));
1587 mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (* complete)(struct mlx_command *mc))
1589 struct mlx_command *mc;
1599 if ((mc = mlx_alloccmd(sc)) == NULL)
1605 mc->mc_flags |= MLX_CMD_PRIORITY | MLX_CMD_DATAOUT;
1606 if (mlx_getslot(mc))
1610 mc->mc_data = result;
1611 mc->mc_length = bufsize;
1612 mc->mc_command = command;
1615 mc->mc_complete = complete;
1616 mc->mc_private = mc;
1619 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
1620 mc->mc_length, mlx_enquire_cb, mc, BUS_DMA_NOWAIT);
1624 if ((mc != NULL) && (mc->mc_complete == NULL))
1625 mlx_releasecmd(mc);
1644 struct mlx_command *mc;
1652 if ((mc = mlx_alloccmd(sc)) == NULL)
1655 if (mlx_getslot(mc))
1659 mlx_make_type2(mc, MLX_CMD_FLUSH, 0, 0, 0, 0, 0, 0, 0, 0);
1662 if (mlx_poll_command(mc))
1666 if (mc->mc_status != 0) {
1667 device_printf(sc->mlx_dev, "FLUSH failed - %s\n", mlx_diagnose_command(mc));
1673 if (mc != NULL)
1674 mlx_releasecmd(mc);
1687 struct mlx_command *mc;
1695 if ((mc = mlx_alloccmd(sc)) == NULL)
1698 if (mlx_getslot(mc))
1702 mlx_make_type2(mc, MLX_CMD_CHECKASYNC, 0, 0, 0, 0, 0, drive | 0x80, 0, 0);
1705 if (mlx_wait_command(mc))
1709 if (mc->mc_status != 0) {
1710 device_printf(sc->mlx_dev, "CHECK ASYNC failed - %s\n", mlx_diagnose_command(mc));
1714 error = mc->mc_status;
1717 if (mc != NULL)
1718 mlx_releasecmd(mc);
1731 struct mlx_command *mc;
1739 if ((mc = mlx_alloccmd(sc)) == NULL)
1742 if (mlx_getslot(mc))
1746 mlx_make_type2(mc, MLX_CMD_REBUILDASYNC, channel, target, 0, 0, 0, 0, 0, 0);
1749 if (mlx_wait_command(mc))
1753 if (mc->mc_status != 0) {
1754 device_printf(sc->mlx_dev, "REBUILD ASYNC failed - %s\n", mlx_diagnose_command(mc));
1758 error = mc->mc_status;
1761 if (mc != NULL)
1762 mlx_releasecmd(mc);
1767 * Run the command (mc) and return when it completes.
1772 mlx_wait_command(struct mlx_command *mc)
1774 struct mlx_softc *sc = mc->mc_sc;
1780 mc->mc_complete = NULL;
1781 mc->mc_private = mc; /* wake us when you're done */
1782 if ((error = mlx_start(mc)) != 0)
1787 while ((mc->mc_status == MLX_STATUS_BUSY) && (count < 30)) {
1788 mtx_sleep(mc->mc_private, &sc->mlx_io_lock, PRIBIO | PCATCH, "mlxwcmd", hz);
1791 if (mc->mc_status != 0) {
1792 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc));
1800 * Start the command (mc) and busy-wait for it to complete.
1807 mlx_poll_command(struct mlx_command *mc)
1809 struct mlx_softc *sc = mc->mc_sc;
1815 mc->mc_complete = NULL;
1816 mc->mc_private = NULL; /* we will poll for it */
1817 if ((error = mlx_start(mc)) != 0)
1823 mlx_done(mc->mc_sc, 1);
1825 } while ((mc->mc_status == MLX_STATUS_BUSY) && (count++ < 15000000));
1826 if (mc->mc_status != MLX_STATUS_BUSY) {
1827 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
1830 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc));
1837 struct mlx_command *mc;
1845 mc = (struct mlx_command *)arg;
1846 mlx_setup_dmamap(mc, segs, nsegments, error);
1848 sc = mc->mc_sc;
1849 bp = mc->mc_private;
1852 mc->mc_flags |= MLX_CMD_DATAIN;
1855 mc->mc_flags |= MLX_CMD_DATAOUT;
1875 mlx_make_type1(mc, (cmd == MLX_CMD_WRITESG) ? MLX_CMD_WRITESG_OLD :
1880 mc->mc_sgphys, /* location of SG list */
1881 mc->mc_nsgent & 0x3f); /* size of SG list */
1883 mlx_make_type5(mc, cmd,
1888 mc->mc_sgphys, /* location of SG list */
1889 mc->mc_nsgent & 0x3f); /* size of SG list */
1893 if (mlx_start(mc) != 0) {
1895 mc->mc_status = MLX_STATUS_WEDGED;
1896 mlx_completeio(mc);
1909 struct mlx_command *mc;
1924 if ((mc = mlx_alloccmd(sc)) == NULL)
1927 if (mlx_getslot(mc) != 0) {
1928 mlx_releasecmd(mc);
1936 mc->mc_complete = mlx_completeio;
1937 mc->mc_private = bp;
1938 mc->mc_data = bp->bio_data;
1939 mc->mc_length = bp->bio_bcount;
1942 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
1943 mc->mc_length, mlx_startio_cb, mc, 0);
1955 mlx_completeio(struct mlx_command *mc)
1957 struct mlx_softc *sc = mc->mc_sc;
1958 struct bio *bp = mc->mc_private;
1962 if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */
1966 switch(mc->mc_status) {
1974 device_printf(sc->mlx_dev, "I/O error - %s\n", mlx_diagnose_command(mc));
1978 device_printf(sc->mlx_dev, " %13D\n", mc->mc_mailbox, " ");
1983 mlx_releasecmd(mc);
1991 struct mlx_command *mc;
1994 mc = (struct mlx_command *)arg;
1998 mlx_setup_dmamap(mc, segs, nsegments, error);
2000 mu = (struct mlx_usercommand *)mc->mc_private;
2009 if (mc->mc_mailbox[0] == MLX_CMD_DIRECT_CDB) {
2010 dcdb = (struct mlx_dcdb *)mc->mc_data;
2011 dcdb->dcdb_physaddr = mc->mc_dataphys + sizeof(*dcdb);
2019 mc->mc_mailbox[mu->mu_bufptr ] = mc->mc_dataphys & 0xff;
2020 mc->mc_mailbox[mu->mu_bufptr + 1] = (mc->mc_dataphys >> 8) & 0xff;
2021 mc->mc_mailbox[mu->mu_bufptr + 2] = (mc->mc_dataphys >> 16) & 0xff;
2022 mc->mc_mailbox[mu->mu_bufptr + 3] = (mc->mc_dataphys >> 24) & 0xff;
2027 if (mlx_wait_command(mc) != 0)
2042 struct mlx_command *mc;
2049 mc = NULL;
2054 if ((mc = mlx_alloccmd(sc)) == NULL) {
2058 bcopy(mu->mu_command, mc->mc_mailbox, sizeof(mc->mc_mailbox));
2081 if (mlx_getslot(mc))
2096 mc->mc_data = kbuf;
2097 mc->mc_length = mu->mu_datasize;
2098 mc->mc_private = mu;
2099 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
2100 mc->mc_length, mlx_user_cb, mc, BUS_DMA_NOWAIT);
2105 mu->mu_status = mc->mc_status;
2113 mlx_releasecmd(mc);
2127 * Find a free command slot for (mc).
2133 mlx_getslot(struct mlx_command *mc)
2135 struct mlx_softc *sc = mc->mc_sc;
2150 if (sc->mlx_busycmds >= ((mc->mc_flags & MLX_CMD_PRIORITY) ? limit : limit - 4))
2164 sc->mlx_busycmd[slot] = mc;
2173 mc->mc_slot = slot;
2178 * Map/unmap (mc)'s data in the controller's addressable space.
2181 mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments,
2184 struct mlx_softc *sc = mc->mc_sc;
2196 sg = sc->mlx_sgtable + (mc->mc_slot * MLX_NSEG);
2199 mc->mc_nsgent = nsegments;
2200 mc->mc_sgphys = sc->mlx_sgbusaddr +
2201 (mc->mc_slot * MLX_NSEG * sizeof(struct mlx_sgentry));
2202 mc->mc_dataphys = segs[0].ds_addr;
2211 if (mc->mc_flags & MLX_CMD_DATAIN)
2212 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap,
2214 if (mc->mc_flags & MLX_CMD_DATAOUT)
2215 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap,
2220 mlx_unmapcmd(struct mlx_command *mc)
2222 struct mlx_softc *sc = mc->mc_sc;
2227 if (mc->mc_data != NULL) {
2229 if (mc->mc_flags & MLX_CMD_DATAIN)
2230 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTREAD);
2231 if (mc->mc_flags & MLX_CMD_DATAOUT)
2232 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTWRITE);
2234 bus_dmamap_unload(sc->mlx_buffer_dmat, mc->mc_dmamap);
2239 * Try to deliver (mc) to the controller.
2244 mlx_start(struct mlx_command *mc)
2246 struct mlx_softc *sc = mc->mc_sc;
2252 mc->mc_mailbox[0x1] = mc->mc_slot;
2255 mc->mc_status = MLX_STATUS_BUSY;
2258 mc->mc_timeout = time_second + 60;
2262 if (sc->mlx_tryqueue(sc, mc)) {
2264 TAILQ_INSERT_TAIL(&sc->mlx_work, mc, mc_link);
2274 sc->mlx_busycmd[mc->mc_slot] = NULL;
2276 mc->mc_status = MLX_STATUS_WEDGED;
2291 struct mlx_command *mc;
2306 mc = sc->mlx_busycmd[slot]; /* find command */
2307 if (mc != NULL) { /* paranoia */
2308 if (mc->mc_status == MLX_STATUS_BUSY) {
2309 mc->mc_status = status; /* save status */
2341 struct mlx_command *mc, *nc;
2347 mc = TAILQ_FIRST(&sc->mlx_work);
2348 while (mc != NULL) {
2349 nc = TAILQ_NEXT(mc, mc_link);
2352 if (mc->mc_status != MLX_STATUS_BUSY) {
2355 mlx_unmapcmd(mc);
2359 if (mc->mc_complete != NULL) {
2361 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
2362 mc->mc_complete(mc);
2367 } else if (mc->mc_private != NULL) { /* sleeping caller wants to know about it */
2370 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
2371 wakeup_one(mc->mc_private);
2379 mc = nc;
2406 struct mlx_command *mc;
2412 if ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL)
2413 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link);
2416 if (mc == NULL) {
2417 mc = (struct mlx_command *)malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT | M_ZERO);
2418 if (mc != NULL) {
2419 mc->mc_sc = sc;
2420 error = bus_dmamap_create(sc->mlx_buffer_dmat, 0, &mc->mc_dmamap);
2422 free(mc, M_DEVBUF);
2427 return(mc);
2437 mlx_releasecmd(struct mlx_command *mc)
2442 MLX_IO_ASSERT_LOCKED(mc->mc_sc);
2443 TAILQ_INSERT_HEAD(&mc->mc_sc->mlx_freecmds, mc, mc_link);
2450 mlx_freecmd(struct mlx_command *mc)
2452 struct mlx_softc *sc = mc->mc_sc;
2455 bus_dmamap_destroy(sc->mlx_buffer_dmat, mc->mc_dmamap);
2456 free(mc, M_DEVBUF);
2467 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2471 mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2482 MLX_V3_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2582 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2586 mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2597 MLX_V4_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2701 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2705 mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2716 MLX_V5_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2816 * Return a status message describing (mc)
2885 mlx_diagnose_command(struct mlx_command *mc)
2892 if (((mc->mc_mailbox[0] == mlx_messages[i].command) || (mlx_messages[i].command == 0)) &&
2893 (mc->mc_status == mlx_messages[i].status))
2896 sprintf(unkmsg, "unknown response 0x%x for command 0x%x", (int)mc->mc_status, (int)mc->mc_mailbox[0]);