1 /* $NetBSD: mpt_netbsd.c,v 1.40 2024/02/09 22:08:34 andvar Exp $ */
2
3 /*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 */
63 /*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */
66
67 /*
68 * mpt_netbsd.c:
69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue.
72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc.
75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.40 2024/02/09 22:08:34 andvar Exp $");
81
82 #include "bio.h"
83
84 #include <dev/ic/mpt.h> /* pulls in all headers */
85 #include <sys/scsiio.h>
86
87 #if NBIO > 0
88 #include <dev/biovar.h>
89 #endif
90
91 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
92 static void mpt_timeout(void *);
93 static void mpt_restart(mpt_softc_t *, request_t *);
94 static void mpt_done(mpt_softc_t *, uint32_t);
95 static int mpt_drain_queue(mpt_softc_t *);
96 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
97 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
98 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
99 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
100 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
101 static void mpt_bus_reset(mpt_softc_t *);
102
103 static void mpt_scsipi_request(struct scsipi_channel *,
104 scsipi_adapter_req_t, void *);
105 static void mpt_minphys(struct buf *);
106 static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
107 struct proc *);
108
109 #if NBIO > 0
110 static bool mpt_is_raid(mpt_softc_t *);
111 static int mpt_bio_ioctl(device_t, u_long, void *);
112 static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *);
113 static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *);
114 static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *);
115 static int mpt_bio_ioctl_disk_novol(mpt_softc_t *, struct bioc_disk *);
116 #endif
117
118 void
mpt_scsipi_attach(mpt_softc_t * mpt)119 mpt_scsipi_attach(mpt_softc_t *mpt)
120 {
121 struct scsipi_adapter *adapt = &mpt->sc_adapter;
122 struct scsipi_channel *chan = &mpt->sc_channel;
123 int maxq;
124
125 mpt->bus = 0; /* XXX ?? */
126
127 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
128 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
129
130 /* Fill in the scsipi_adapter. */
131 memset(adapt, 0, sizeof(*adapt));
132 adapt->adapt_dev = mpt->sc_dev;
133 adapt->adapt_nchannels = 1;
134 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
135 adapt->adapt_max_periph = maxq - 2;
136 adapt->adapt_request = mpt_scsipi_request;
137 adapt->adapt_minphys = mpt_minphys;
138 adapt->adapt_ioctl = mpt_ioctl;
139
140 /* Fill in the scsipi_channel. */
141 memset(chan, 0, sizeof(*chan));
142 chan->chan_adapter = adapt;
143 if (mpt->is_sas) {
144 chan->chan_bustype = &scsi_sas_bustype;
145 } else if (mpt->is_fc) {
146 chan->chan_bustype = &scsi_fc_bustype;
147 } else {
148 chan->chan_bustype = &scsi_bustype;
149 }
150 chan->chan_channel = 0;
151 chan->chan_flags = 0;
152 chan->chan_nluns = 8;
153 chan->chan_ntargets = mpt->mpt_max_devices ? mpt->mpt_max_devices : 256;
154 chan->chan_id = mpt->mpt_ini_id;
155
156 /*
157 * Save the output of the config so we can rescan the bus in case of
158 * errors
159 */
160 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
161 scsiprint, CFARGS_NONE);
162
163 #if NBIO > 0
164 if (mpt_is_raid(mpt)) {
165 if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0)
166 panic("%s: controller registration failed",
167 device_xname(mpt->sc_dev));
168 }
169 #endif
170 }
171
172 int
mpt_dma_mem_alloc(mpt_softc_t * mpt)173 mpt_dma_mem_alloc(mpt_softc_t *mpt)
174 {
175 bus_dma_segment_t reply_seg, request_seg;
176 int reply_rseg, request_rseg;
177 bus_addr_t pptr, end;
178 char *vptr;
179 size_t len;
180 int error, i;
181
182 /* Check if we have already allocated the reply memory. */
183 if (mpt->reply != NULL)
184 return (0);
185
186 /*
187 * Allocate the request pool. This isn't really DMA'd memory,
188 * but it's a convenient place to do it.
189 */
190 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
191 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
192 if (mpt->request_pool == NULL) {
193 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
194 return (ENOMEM);
195 }
196
197 /*
198 * Allocate DMA resources for reply buffers.
199 */
200 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
201 &reply_seg, 1, &reply_rseg, 0);
202 if (error) {
203 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
204 error);
205 goto fail_0;
206 }
207
208 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
209 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
210 if (error) {
211 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
212 error);
213 goto fail_1;
214 }
215
216 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
217 0, 0, &mpt->reply_dmap);
218 if (error) {
219 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
220 error);
221 goto fail_2;
222 }
223
224 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
225 PAGE_SIZE, NULL, 0);
226 if (error) {
227 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
228 error);
229 goto fail_3;
230 }
231 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
232
233 /*
234 * Allocate DMA resources for request buffers.
235 */
236 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
237 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
238 if (error) {
239 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
240 "error = %d\n", error);
241 goto fail_4;
242 }
243
244 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
245 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
246 if (error) {
247 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
248 error);
249 goto fail_5;
250 }
251
252 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
253 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
254 if (error) {
255 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
256 "error = %d\n", error);
257 goto fail_6;
258 }
259
260 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
261 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
262 if (error) {
263 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
264 error);
265 goto fail_7;
266 }
267 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
268
269 pptr = mpt->request_phys;
270 vptr = (void *) mpt->request;
271 end = pptr + MPT_REQ_MEM_SIZE(mpt);
272
273 for (i = 0; pptr < end; i++) {
274 request_t *req = &mpt->request_pool[i];
275 req->index = i;
276
277 /* Store location of Request Data */
278 req->req_pbuf = pptr;
279 req->req_vbuf = vptr;
280
281 pptr += MPT_REQUEST_AREA;
282 vptr += MPT_REQUEST_AREA;
283
284 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
285 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
286
287 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
288 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
289 if (error) {
290 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
291 "error = %d\n", i, error);
292 goto fail_8;
293 }
294 }
295
296 return (0);
297
298 fail_8:
299 for (--i; i >= 0; i--) {
300 request_t *req = &mpt->request_pool[i];
301 if (req->dmap != NULL)
302 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
303 }
304 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
305 fail_7:
306 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
307 fail_6:
308 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
309 fail_5:
310 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
311 fail_4:
312 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
313 fail_3:
314 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
315 fail_2:
316 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
317 fail_1:
318 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
319 fail_0:
320 free(mpt->request_pool, M_DEVBUF);
321
322 mpt->reply = NULL;
323 mpt->request = NULL;
324 mpt->request_pool = NULL;
325
326 return (error);
327 }
328
329 int
mpt_intr(void * arg)330 mpt_intr(void *arg)
331 {
332 mpt_softc_t *mpt = arg;
333 int nrepl = 0;
334
335 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
336 return (0);
337
338 nrepl = mpt_drain_queue(mpt);
339 return (nrepl != 0);
340 }
341
342 void
mpt_prt(mpt_softc_t * mpt,const char * fmt,...)343 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
344 {
345 va_list ap;
346
347 printf("%s: ", device_xname(mpt->sc_dev));
348 va_start(ap, fmt);
349 vprintf(fmt, ap);
350 va_end(ap);
351 printf("\n");
352 }
353
354 static int
mpt_poll(mpt_softc_t * mpt,struct scsipi_xfer * xs,int count)355 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
356 {
357
358 /* Timeouts are in msec, so we loop in 1000usec cycles */
359 while (count) {
360 mpt_intr(mpt);
361 if (xs->xs_status & XS_STS_DONE)
362 return (0);
363 delay(1000); /* only happens in boot, so ok */
364 count--;
365 }
366 return (1);
367 }
368
369 static void
mpt_timeout(void * arg)370 mpt_timeout(void *arg)
371 {
372 request_t *req = arg;
373 struct scsipi_xfer *xs;
374 struct scsipi_periph *periph;
375 mpt_softc_t *mpt;
376 uint32_t oseq;
377 int s, nrepl = 0;
378
379 if (req->xfer == NULL) {
380 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
381 req->index, req->sequence);
382 return;
383 }
384 xs = req->xfer;
385 periph = xs->xs_periph;
386 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
387 scsipi_printaddr(periph);
388 printf("command timeout\n");
389
390 s = splbio();
391
392 oseq = req->sequence;
393 mpt->timeouts++;
394 if (mpt_intr(mpt)) {
395 if (req->sequence != oseq) {
396 mpt->success++;
397 mpt_prt(mpt, "recovered from command timeout");
398 splx(s);
399 return;
400 }
401 }
402
403 /*
404 * Ensure the IOC is really done giving us data since it appears it can
405 * sometimes fail to give us interrupts under heavy load.
406 */
407 nrepl = mpt_drain_queue(mpt);
408 if (nrepl ) {
409 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
410 }
411
412 if (req->sequence != oseq) {
413 mpt->success++;
414 splx(s);
415 return;
416 }
417
418 mpt_prt(mpt,
419 "timeout on request index = 0x%x, seq = 0x%08x",
420 req->index, req->sequence);
421 mpt_check_doorbell(mpt);
422 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
423 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
424 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
425 mpt_read(mpt, MPT_OFFSET_DOORBELL));
426 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
427 if (mpt->verbose > 1)
428 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
429
430 xs->error = XS_TIMEOUT;
431 splx(s);
432 mpt_restart(mpt, req);
433 }
434
435 static void
mpt_restart(mpt_softc_t * mpt,request_t * req0)436 mpt_restart(mpt_softc_t *mpt, request_t *req0)
437 {
438 int i, s, nreq;
439 request_t *req;
440 struct scsipi_xfer *xs;
441
442 /* first, reset the IOC, leaving stopped so all requests are idle */
443 if (mpt_soft_reset(mpt) != MPT_OK) {
444 mpt_prt(mpt, "soft reset failed");
445 /*
446 * Don't try a hard reset since this mangles the PCI
447 * configuration registers.
448 */
449 return;
450 }
451
452 /* Freeze the channel so scsipi doesn't queue more commands. */
453 scsipi_channel_freeze(&mpt->sc_channel, 1);
454
455 /* Return all pending requests to scsipi and de-allocate them. */
456 s = splbio();
457 nreq = 0;
458 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
459 req = &mpt->request_pool[i];
460 xs = req->xfer;
461 if (xs != NULL) {
462 if (xs->datalen != 0)
463 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
464 req->xfer = NULL;
465 callout_stop(&xs->xs_callout);
466 if (req != req0) {
467 nreq++;
468 xs->error = XS_REQUEUE;
469 }
470 scsipi_done(xs);
471 /*
472 * Don't need to mpt_free_request() since mpt_init()
473 * below will free all requests anyway.
474 */
475 mpt_free_request(mpt, req);
476 }
477 }
478 splx(s);
479 if (nreq > 0)
480 mpt_prt(mpt, "re-queued %d requests", nreq);
481
482 /* Re-initialize the IOC (which restarts it). */
483 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
484 mpt_prt(mpt, "restart succeeded");
485 /* else error message already printed */
486
487 /* Thaw the channel, causing scsipi to re-queue the commands. */
488 scsipi_channel_thaw(&mpt->sc_channel, 1);
489 }
490
491 static int
mpt_drain_queue(mpt_softc_t * mpt)492 mpt_drain_queue(mpt_softc_t *mpt)
493 {
494 int nrepl = 0;
495 uint32_t reply;
496
497 reply = mpt_pop_reply_queue(mpt);
498 while (reply != MPT_REPLY_EMPTY) {
499 nrepl++;
500 if (mpt->verbose > 1) {
501 if ((reply & MPT_CONTEXT_REPLY) != 0) {
502 /* Address reply; IOC has something to say */
503 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
504 } else {
505 /* Context reply; all went well */
506 mpt_prt(mpt, "context %u reply OK", reply);
507 }
508 }
509 mpt_done(mpt, reply);
510 reply = mpt_pop_reply_queue(mpt);
511 }
512 return (nrepl);
513 }
514
515 static void
mpt_done(mpt_softc_t * mpt,uint32_t reply)516 mpt_done(mpt_softc_t *mpt, uint32_t reply)
517 {
518 struct scsipi_xfer *xs = NULL;
519 struct scsipi_periph *periph;
520 int index;
521 request_t *req;
522 MSG_REQUEST_HEADER *mpt_req;
523 MSG_SCSI_IO_REPLY *mpt_reply;
524 int restart = 0; /* nonzero if we need to restart the IOC*/
525
526 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
527 /* context reply (ok) */
528 mpt_reply = NULL;
529 index = reply & MPT_CONTEXT_MASK;
530 } else {
531 /* address reply (error) */
532
533 /* XXX BUS_DMASYNC_POSTREAD XXX */
534 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
535 if (mpt_reply != NULL) {
536 if (mpt->verbose > 1) {
537 uint32_t *pReply = (uint32_t *) mpt_reply;
538
539 mpt_prt(mpt, "Address Reply (index %u):",
540 le32toh(mpt_reply->MsgContext) & 0xffff);
541 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
542 pReply[1], pReply[2], pReply[3]);
543 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
544 pReply[5], pReply[6], pReply[7]);
545 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
546 pReply[9], pReply[10], pReply[11]);
547 }
548 index = le32toh(mpt_reply->MsgContext);
549 } else
550 index = reply & MPT_CONTEXT_MASK;
551 }
552
553 /*
554 * Address reply with MessageContext high bit set.
555 * This is most likely a notify message, so we try
556 * to process it, then free it.
557 */
558 if (__predict_false((index & 0x80000000) != 0)) {
559 if (mpt_reply != NULL)
560 mpt_ctlop(mpt, mpt_reply, reply);
561 else
562 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
563 index);
564 return;
565 }
566
567 /* Did we end up with a valid index into the table? */
568 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
569 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
570 index);
571 return;
572 }
573
574 req = &mpt->request_pool[index];
575
576 /* Make sure memory hasn't been trashed. */
577 if (__predict_false(req->index != index)) {
578 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
579 index);
580 return;
581 }
582
583 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
584 mpt_req = req->req_vbuf;
585
586 /* Short cut for task management replies; nothing more for us to do. */
587 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
588 if (mpt->verbose > 1)
589 mpt_prt(mpt, "%s: TASK MGMT", __func__);
590 KASSERT(req == mpt->mngt_req);
591 mpt->mngt_req = NULL;
592 goto done;
593 }
594
595 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
596 goto done;
597
598 /*
599 * At this point, it had better be a SCSI I/O command, but don't
600 * crash if it isn't.
601 */
602 if (__predict_false(mpt_req->Function !=
603 MPI_FUNCTION_SCSI_IO_REQUEST)) {
604 if (mpt->verbose > 1)
605 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
606 __func__, mpt_req->Function, index);
607 goto done;
608 }
609
610 /* Recover scsipi_xfer from the request structure. */
611 xs = req->xfer;
612
613 /* Can't have a SCSI command without a scsipi_xfer. */
614 if (__predict_false(xs == NULL)) {
615 mpt_prt(mpt,
616 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
617 req->index, req->sequence);
618 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
619 mpt_prt(mpt, "mpt_request:");
620 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
621
622 if (mpt_reply != NULL) {
623 mpt_prt(mpt, "mpt_reply:");
624 mpt_print_reply(mpt_reply);
625 } else {
626 mpt_prt(mpt, "context reply: 0x%08x", reply);
627 }
628 goto done;
629 }
630
631 callout_stop(&xs->xs_callout);
632
633 periph = xs->xs_periph;
634
635 /*
636 * If we were a data transfer, unload the map that described
637 * the data buffer.
638 */
639 if (__predict_true(xs->datalen != 0)) {
640 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
641 req->dmap->dm_mapsize,
642 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
643 : BUS_DMASYNC_POSTWRITE);
644 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
645 }
646
647 if (__predict_true(mpt_reply == NULL)) {
648 /*
649 * Context reply; report that the command was
650 * successful!
651 *
652 * Also report the xfer mode, if necessary.
653 */
654 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
655 if ((mpt->mpt_report_xfer_mode &
656 (1 << periph->periph_target)) != 0)
657 mpt_get_xfer_mode(mpt, periph);
658 }
659 xs->error = XS_NOERROR;
660 xs->status = SCSI_OK;
661 xs->resid = 0;
662 mpt_free_request(mpt, req);
663 scsipi_done(xs);
664 return;
665 }
666
667 xs->status = mpt_reply->SCSIStatus;
668 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
669 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
670 xs->error = XS_DRIVER_STUFFUP;
671 mpt_prt(mpt, "%s: IOC overrun!", __func__);
672 break;
673
674 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
675 /*
676 * Yikes! Tagged queue full comes through this path!
677 *
678 * So we'll change it to a status error and anything
679 * that returns status should probably be a status
680 * error as well.
681 */
682 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
683 if (mpt_reply->SCSIState &
684 MPI_SCSI_STATE_NO_SCSI_STATUS) {
685 xs->error = XS_DRIVER_STUFFUP;
686 break;
687 }
688 /* FALLTHROUGH */
689 case MPI_IOCSTATUS_SUCCESS:
690 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
691 switch (xs->status) {
692 case SCSI_OK:
693 /* Report the xfer mode, if necessary. */
694 if ((mpt->mpt_report_xfer_mode &
695 (1 << periph->periph_target)) != 0)
696 mpt_get_xfer_mode(mpt, periph);
697 xs->resid = 0;
698 break;
699
700 case SCSI_CHECK:
701 xs->error = XS_SENSE;
702 break;
703
704 case SCSI_BUSY:
705 case SCSI_QUEUE_FULL:
706 xs->error = XS_BUSY;
707 break;
708
709 default:
710 scsipi_printaddr(periph);
711 printf("invalid status code %d\n", xs->status);
712 xs->error = XS_DRIVER_STUFFUP;
713 break;
714 }
715 break;
716
717 case MPI_IOCSTATUS_BUSY:
718 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
719 xs->error = XS_RESOURCE_SHORTAGE;
720 break;
721
722 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
723 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
724 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
725 xs->error = XS_SELTIMEOUT;
726 break;
727
728 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
729 xs->error = XS_DRIVER_STUFFUP;
730 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
731 restart = 1;
732 break;
733
734 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
735 /* XXX What should we do here? */
736 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
737 restart = 1;
738 break;
739
740 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
741 /* XXX */
742 xs->error = XS_DRIVER_STUFFUP;
743 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
744 restart = 1;
745 break;
746
747 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
748 /* XXX */
749 xs->error = XS_DRIVER_STUFFUP;
750 mpt_prt(mpt, "%s: IOC task terminated!", __func__);
751 restart = 1;
752 break;
753
754 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
755 /* XXX This is a bus-reset */
756 xs->error = XS_DRIVER_STUFFUP;
757 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
758 restart = 1;
759 break;
760
761 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
762 /*
763 * FreeBSD and Linux indicate this is a phase error between
764 * the IOC and the drive itself. When this happens, the IOC
765 * becomes unhappy and stops processing all transactions.
766 * Call mpt_timeout which knows how to get the IOC back
767 * on its feet.
768 */
769 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
770 "recovering...", __func__);
771 xs->error = XS_TIMEOUT;
772 restart = 1;
773
774 break;
775
776 default:
777 /* XXX unrecognized HBA error */
778 xs->error = XS_DRIVER_STUFFUP;
779 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
780 le16toh(mpt_reply->IOCStatus));
781 restart = 1;
782 break;
783 }
784
785 if (mpt_reply != NULL) {
786 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
787 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
788 sizeof(xs->sense.scsi_sense));
789 } else if (mpt_reply->SCSIState &
790 MPI_SCSI_STATE_AUTOSENSE_FAILED) {
791 /*
792 * This will cause the scsipi layer to issue
793 * a REQUEST SENSE.
794 */
795 if (xs->status == SCSI_CHECK)
796 xs->error = XS_BUSY;
797 }
798 }
799
800 done:
801 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
802 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
803 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
804 mpt_ctlop(mpt, mpt_reply, reply);
805 }
806
807 /* If IOC done with this request, free it up. */
808 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
809 mpt_free_request(mpt, req);
810
811 /* If address reply, give the buffer back to the IOC. */
812 if (mpt_reply != NULL)
813 mpt_free_reply(mpt, (reply << 1));
814
815 if (xs != NULL)
816 scsipi_done(xs);
817
818 if (restart) {
819 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
820 mpt_restart(mpt, NULL);
821 }
822 }
823
824 static void
mpt_run_xfer(mpt_softc_t * mpt,struct scsipi_xfer * xs)825 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
826 {
827 struct scsipi_periph *periph = xs->xs_periph;
828 request_t *req;
829 MSG_SCSI_IO_REQUEST *mpt_req;
830 int error, s;
831
832 s = splbio();
833 req = mpt_get_request(mpt);
834 if (__predict_false(req == NULL)) {
835 /* This should happen very infrequently. */
836 xs->error = XS_RESOURCE_SHORTAGE;
837 scsipi_done(xs);
838 splx(s);
839 return;
840 }
841 splx(s);
842
843 /* Link the req and the scsipi_xfer. */
844 req->xfer = xs;
845
846 /* Now we build the command for the IOC */
847 mpt_req = req->req_vbuf;
848 memset(mpt_req, 0, sizeof(*mpt_req));
849
850 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
851 mpt_req->Bus = mpt->bus;
852
853 mpt_req->SenseBufferLength =
854 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
855 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
856
857 /*
858 * We use the message context to find the request structure when
859 * we get the command completion interrupt from the IOC.
860 */
861 mpt_req->MsgContext = htole32(req->index);
862
863 /* Which physical device to do the I/O on. */
864 mpt_req->TargetID = periph->periph_target;
865 mpt_req->LUN[1] = periph->periph_lun;
866
867 /* Set the direction of the transfer. */
868 if (xs->xs_control & XS_CTL_DATA_IN)
869 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
870 else if (xs->xs_control & XS_CTL_DATA_OUT)
871 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
872 else
873 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
874
875 /* Set the queue behavior. */
876 if (__predict_true((!mpt->is_scsi) ||
877 (mpt->mpt_tag_enable &
878 (1 << periph->periph_target)))) {
879 switch (XS_CTL_TAGTYPE(xs)) {
880 case XS_CTL_HEAD_TAG:
881 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
882 break;
883
884 #if 0 /* XXX */
885 case XS_CTL_ACA_TAG:
886 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
887 break;
888 #endif
889
890 case XS_CTL_ORDERED_TAG:
891 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
892 break;
893
894 case XS_CTL_SIMPLE_TAG:
895 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
896 break;
897
898 default:
899 if (mpt->is_scsi)
900 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
901 else
902 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
903 break;
904 }
905 } else
906 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
907
908 if (__predict_false(mpt->is_scsi &&
909 (mpt->mpt_disc_enable &
910 (1 << periph->periph_target)) == 0))
911 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
912
913 mpt_req->Control = htole32(mpt_req->Control);
914
915 /* Copy the SCSI command block into place. */
916 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
917
918 mpt_req->CDBLength = xs->cmdlen;
919 mpt_req->DataLength = htole32(xs->datalen);
920 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
921
922 /*
923 * Map the DMA transfer.
924 */
925 if (xs->datalen) {
926 SGE_SIMPLE32 *se;
927
928 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
929 xs->datalen, NULL,
930 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
931 : BUS_DMA_WAITOK) |
932 BUS_DMA_STREAMING |
933 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
934 : BUS_DMA_WRITE));
935 switch (error) {
936 case 0:
937 break;
938
939 case ENOMEM:
940 case EAGAIN:
941 xs->error = XS_RESOURCE_SHORTAGE;
942 goto out_bad;
943
944 default:
945 xs->error = XS_DRIVER_STUFFUP;
946 mpt_prt(mpt, "error %d loading DMA map", error);
947 out_bad:
948 s = splbio();
949 mpt_free_request(mpt, req);
950 scsipi_done(xs);
951 splx(s);
952 return;
953 }
954
955 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
956 int seg, i, nleft = req->dmap->dm_nsegs;
957 uint32_t flags;
958 SGE_CHAIN32 *ce;
959
960 seg = 0;
961 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
962 if (xs->xs_control & XS_CTL_DATA_OUT)
963 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
964
965 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
966 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
967 i++, se++, seg++) {
968 uint32_t tf;
969
970 memset(se, 0, sizeof(*se));
971 se->Address =
972 htole32(req->dmap->dm_segs[seg].ds_addr);
973 MPI_pSGE_SET_LENGTH(se,
974 req->dmap->dm_segs[seg].ds_len);
975 tf = flags;
976 if (i == MPT_NSGL_FIRST(mpt) - 2)
977 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
978 MPI_pSGE_SET_FLAGS(se, tf);
979 se->FlagsLength = htole32(se->FlagsLength);
980 nleft--;
981 }
982
983 /*
984 * Tell the IOC where to find the first chain element.
985 */
986 mpt_req->ChainOffset =
987 ((char *)se - (char *)mpt_req) >> 2;
988
989 /*
990 * Until we're finished with all segments...
991 */
992 while (nleft) {
993 int ntodo;
994
995 /*
996 * Construct the chain element that points to
997 * the next segment.
998 */
999 ce = (SGE_CHAIN32 *) se++;
1000 if (nleft > MPT_NSGL(mpt)) {
1001 ntodo = MPT_NSGL(mpt) - 1;
1002 ce->NextChainOffset = (MPT_RQSL(mpt) -
1003 sizeof(SGE_SIMPLE32)) >> 2;
1004 ce->Length = htole16(MPT_NSGL(mpt)
1005 * sizeof(SGE_SIMPLE32));
1006 } else {
1007 ntodo = nleft;
1008 ce->NextChainOffset = 0;
1009 ce->Length = htole16(ntodo
1010 * sizeof(SGE_SIMPLE32));
1011 }
1012 ce->Address = htole32(req->req_pbuf +
1013 ((char *)se - (char *)mpt_req));
1014 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1015 for (i = 0; i < ntodo; i++, se++, seg++) {
1016 uint32_t tf;
1017
1018 memset(se, 0, sizeof(*se));
1019 se->Address = htole32(
1020 req->dmap->dm_segs[seg].ds_addr);
1021 MPI_pSGE_SET_LENGTH(se,
1022 req->dmap->dm_segs[seg].ds_len);
1023 tf = flags;
1024 if (i == ntodo - 1) {
1025 tf |=
1026 MPI_SGE_FLAGS_LAST_ELEMENT;
1027 if (ce->NextChainOffset == 0) {
1028 tf |=
1029 MPI_SGE_FLAGS_END_OF_LIST |
1030 MPI_SGE_FLAGS_END_OF_BUFFER;
1031 }
1032 }
1033 MPI_pSGE_SET_FLAGS(se, tf);
1034 se->FlagsLength =
1035 htole32(se->FlagsLength);
1036 nleft--;
1037 }
1038 }
1039 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1040 req->dmap->dm_mapsize,
1041 (xs->xs_control & XS_CTL_DATA_IN) ?
1042 BUS_DMASYNC_PREREAD
1043 : BUS_DMASYNC_PREWRITE);
1044 } else {
1045 int i;
1046 uint32_t flags;
1047
1048 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1049 if (xs->xs_control & XS_CTL_DATA_OUT)
1050 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1051
1052 /* Copy the segments into our SG list. */
1053 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1054 for (i = 0; i < req->dmap->dm_nsegs;
1055 i++, se++) {
1056 uint32_t tf;
1057
1058 memset(se, 0, sizeof(*se));
1059 se->Address =
1060 htole32(req->dmap->dm_segs[i].ds_addr);
1061 MPI_pSGE_SET_LENGTH(se,
1062 req->dmap->dm_segs[i].ds_len);
1063 tf = flags;
1064 if (i == req->dmap->dm_nsegs - 1) {
1065 tf |=
1066 MPI_SGE_FLAGS_LAST_ELEMENT |
1067 MPI_SGE_FLAGS_END_OF_BUFFER |
1068 MPI_SGE_FLAGS_END_OF_LIST;
1069 }
1070 MPI_pSGE_SET_FLAGS(se, tf);
1071 se->FlagsLength = htole32(se->FlagsLength);
1072 }
1073 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1074 req->dmap->dm_mapsize,
1075 (xs->xs_control & XS_CTL_DATA_IN) ?
1076 BUS_DMASYNC_PREREAD
1077 : BUS_DMASYNC_PREWRITE);
1078 }
1079 } else {
1080 /*
1081 * No data to transfer; just make a single simple SGL
1082 * with zero length.
1083 */
1084 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1085 memset(se, 0, sizeof(*se));
1086 MPI_pSGE_SET_FLAGS(se,
1087 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1088 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1089 se->FlagsLength = htole32(se->FlagsLength);
1090 }
1091
1092 if (mpt->verbose > 1)
1093 mpt_print_scsi_io_request(mpt_req);
1094
1095 if (xs->timeout == 0) {
1096 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1097 req->index);
1098 xs->timeout = 500;
1099 }
1100
1101 s = splbio();
1102 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1103 callout_reset(&xs->xs_callout,
1104 mstohz(xs->timeout), mpt_timeout, req);
1105 mpt_send_cmd(mpt, req);
1106 splx(s);
1107
1108 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1109 return;
1110
1111 /*
1112 * If we can't use interrupts, poll on completion.
1113 */
1114 if (mpt_poll(mpt, xs, xs->timeout))
1115 mpt_timeout(req);
1116 }
1117
1118 static void
mpt_set_xfer_mode(mpt_softc_t * mpt,struct scsipi_xfer_mode * xm)1119 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1120 {
1121 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1122
1123 if (xm->xm_mode & PERIPH_CAP_TQING)
1124 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1125 else
1126 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1127
1128 if (mpt->is_scsi) {
1129 /*
1130 * Always allow disconnect; we don't have a way to disable
1131 * it right now, in any case.
1132 */
1133 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1134
1135 /*
1136 * SCSI transport settings only make any sense for
1137 * SCSI
1138 */
1139
1140 tmp = mpt->mpt_dev_page1[xm->xm_target];
1141
1142 /*
1143 * Set the wide/narrow parameter for the target.
1144 */
1145 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1146 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1147 else
1148 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1149
1150 /*
1151 * Set the synchronous parameters for the target.
1152 *
1153 * XXX If we request sync transfers, we just go ahead and
1154 * XXX request the maximum available. We need finer control
1155 * XXX in order to implement Domain Validation.
1156 */
1157 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1158 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1159 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1160 MPI_SCSIDEVPAGE1_RP_IU);
1161 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1162 int factor, offset, np;
1163
1164 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1165 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1166 np = 0;
1167 if (factor < 0x9) {
1168 /* Ultra320 */
1169 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1170 }
1171 if (factor < 0xa) {
1172 /* at least Ultra160 */
1173 np |= MPI_SCSIDEVPAGE1_RP_DT;
1174 }
1175 np |= (factor << 8) | (offset << 16);
1176 tmp.RequestedParameters |= np;
1177 }
1178
1179 host2mpt_config_page_scsi_device_1(&tmp);
1180 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1181 mpt_prt(mpt, "unable to write Device Page 1");
1182 return;
1183 }
1184
1185 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1186 mpt_prt(mpt, "unable to read back Device Page 1");
1187 return;
1188 }
1189
1190 mpt2host_config_page_scsi_device_1(&tmp);
1191 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1192 if (mpt->verbose > 1) {
1193 mpt_prt(mpt,
1194 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1195 xm->xm_target,
1196 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1197 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1198 }
1199 }
1200
1201 /*
1202 * Make a note that we should perform an async callback at the
1203 * end of the next successful command completion to report the
1204 * negotiated transfer mode.
1205 */
1206 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1207 }
1208
1209 static void
mpt_get_xfer_mode(mpt_softc_t * mpt,struct scsipi_periph * periph)1210 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1211 {
1212 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1213 struct scsipi_xfer_mode xm;
1214 int period, offset;
1215
1216 tmp = mpt->mpt_dev_page0[periph->periph_target];
1217 host2mpt_config_page_scsi_device_0(&tmp);
1218 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1219 mpt_prt(mpt, "unable to read Device Page 0");
1220 return;
1221 }
1222 mpt2host_config_page_scsi_device_0(&tmp);
1223
1224 if (mpt->verbose > 1) {
1225 mpt_prt(mpt,
1226 "SPI Tgt %d Page 0: NParms %x Information %x",
1227 periph->periph_target,
1228 tmp.NegotiatedParameters, tmp.Information);
1229 }
1230
1231 xm.xm_target = periph->periph_target;
1232 xm.xm_mode = 0;
1233
1234 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1235 xm.xm_mode |= PERIPH_CAP_WIDE16;
1236
1237 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1238 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1239 if (offset) {
1240 xm.xm_period = period;
1241 xm.xm_offset = offset;
1242 xm.xm_mode |= PERIPH_CAP_SYNC;
1243 }
1244
1245 /*
1246 * Tagged queueing is all controlled by us; there is no
1247 * other setting to query.
1248 */
1249 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1250 xm.xm_mode |= PERIPH_CAP_TQING;
1251
1252 /*
1253 * We're going to deliver the async event, so clear the marker.
1254 */
1255 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1256
1257 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1258 }
1259
1260 static void
mpt_ctlop(mpt_softc_t * mpt,void * vmsg,uint32_t reply)1261 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1262 {
1263 MSG_DEFAULT_REPLY *dmsg = vmsg;
1264
1265 switch (dmsg->Function) {
1266 case MPI_FUNCTION_EVENT_NOTIFICATION:
1267 mpt_event_notify_reply(mpt, vmsg);
1268 mpt_free_reply(mpt, (reply << 1));
1269 break;
1270
1271 case MPI_FUNCTION_EVENT_ACK:
1272 {
1273 MSG_EVENT_ACK_REPLY *msg = vmsg;
1274 int index = le32toh(msg->MsgContext) & ~0x80000000;
1275 mpt_free_reply(mpt, (reply << 1));
1276 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1277 request_t *req = &mpt->request_pool[index];
1278 mpt_free_request(mpt, req);
1279 }
1280 break;
1281 }
1282
1283 case MPI_FUNCTION_PORT_ENABLE:
1284 {
1285 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1286 int index = le32toh(msg->MsgContext) & ~0x80000000;
1287 if (mpt->verbose > 1)
1288 mpt_prt(mpt, "enable port reply index %d", index);
1289 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1290 request_t *req = &mpt->request_pool[index];
1291 req->debug = REQ_DONE;
1292 }
1293 mpt_free_reply(mpt, (reply << 1));
1294 break;
1295 }
1296
1297 case MPI_FUNCTION_CONFIG:
1298 {
1299 MSG_CONFIG_REPLY *msg = vmsg;
1300 int index = le32toh(msg->MsgContext) & ~0x80000000;
1301 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1302 request_t *req = &mpt->request_pool[index];
1303 req->debug = REQ_DONE;
1304 req->sequence = reply;
1305 } else
1306 mpt_free_reply(mpt, (reply << 1));
1307 break;
1308 }
1309
1310 default:
1311 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1312 }
1313 }
1314
1315 static void
mpt_event_notify_reply(mpt_softc_t * mpt,MSG_EVENT_NOTIFY_REPLY * msg)1316 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1317 {
1318
1319 switch (le32toh(msg->Event)) {
1320 case MPI_EVENT_LOG_DATA:
1321 {
1322 int i;
1323
1324 /* Some error occurrerd that the Fusion wants logged. */
1325 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1326 mpt_prt(mpt, "EvtLogData: Event Data:");
1327 for (i = 0; i < msg->EventDataLength; i++) {
1328 if ((i % 4) == 0)
1329 printf("%s:\t", device_xname(mpt->sc_dev));
1330 printf("0x%08x%c", msg->Data[i],
1331 ((i % 4) == 3) ? '\n' : ' ');
1332 }
1333 if ((i % 4) != 0)
1334 printf("\n");
1335 break;
1336 }
1337
1338 case MPI_EVENT_UNIT_ATTENTION:
1339 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1340 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1341 break;
1342
1343 case MPI_EVENT_IOC_BUS_RESET:
1344 /* We generated a bus reset. */
1345 mpt_prt(mpt, "IOC Bus Reset Port %d",
1346 (msg->Data[0] >> 8) & 0xff);
1347 break;
1348
1349 case MPI_EVENT_EXT_BUS_RESET:
1350 /* Someone else generated a bus reset. */
1351 mpt_prt(mpt, "External Bus Reset");
1352 /*
1353 * These replies don't return EventData like the MPI
1354 * spec says they do.
1355 */
1356 /* XXX Send an async event? */
1357 break;
1358
1359 case MPI_EVENT_RESCAN:
1360 /*
1361 * In general, this means a device has been added
1362 * to the loop.
1363 */
1364 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1365 /* XXX Send an async event? */
1366 break;
1367
1368 case MPI_EVENT_LINK_STATUS_CHANGE:
1369 mpt_prt(mpt, "Port %d: Link state %s",
1370 (msg->Data[1] >> 8) & 0xff,
1371 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1372 break;
1373
1374 case MPI_EVENT_LOOP_STATE_CHANGE:
1375 switch ((msg->Data[0] >> 16) & 0xff) {
1376 case 0x01:
1377 mpt_prt(mpt,
1378 "Port %d: FC Link Event: LIP(%02x,%02x) "
1379 "(Loop Initialization)",
1380 (msg->Data[1] >> 8) & 0xff,
1381 (msg->Data[0] >> 8) & 0xff,
1382 (msg->Data[0] ) & 0xff);
1383 switch ((msg->Data[0] >> 8) & 0xff) {
1384 case 0xf7:
1385 if ((msg->Data[0] & 0xff) == 0xf7)
1386 mpt_prt(mpt, "\tDevice needs AL_PA");
1387 else
1388 mpt_prt(mpt, "\tDevice %02x doesn't "
1389 "like FC performance",
1390 msg->Data[0] & 0xff);
1391 break;
1392
1393 case 0xf8:
1394 if ((msg->Data[0] & 0xff) == 0xf7)
1395 mpt_prt(mpt, "\tDevice detected loop "
1396 "failure before acquiring AL_PA");
1397 else
1398 mpt_prt(mpt, "\tDevice %02x detected "
1399 "loop failure",
1400 msg->Data[0] & 0xff);
1401 break;
1402
1403 default:
1404 mpt_prt(mpt, "\tDevice %02x requests that "
1405 "device %02x reset itself",
1406 msg->Data[0] & 0xff,
1407 (msg->Data[0] >> 8) & 0xff);
1408 break;
1409 }
1410 break;
1411
1412 case 0x02:
1413 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1414 "(Loop Port Enable)",
1415 (msg->Data[1] >> 8) & 0xff,
1416 (msg->Data[0] >> 8) & 0xff,
1417 (msg->Data[0] ) & 0xff);
1418 break;
1419
1420 case 0x03:
1421 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1422 "(Loop Port Bypass)",
1423 (msg->Data[1] >> 8) & 0xff,
1424 (msg->Data[0] >> 8) & 0xff,
1425 (msg->Data[0] ) & 0xff);
1426 break;
1427
1428 default:
1429 mpt_prt(mpt, "Port %d: FC Link Event: "
1430 "Unknown event (%02x %02x %02x)",
1431 (msg->Data[1] >> 8) & 0xff,
1432 (msg->Data[0] >> 16) & 0xff,
1433 (msg->Data[0] >> 8) & 0xff,
1434 (msg->Data[0] ) & 0xff);
1435 break;
1436 }
1437 break;
1438
1439 case MPI_EVENT_LOGOUT:
1440 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1441 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1442 break;
1443
1444 case MPI_EVENT_EVENT_CHANGE:
1445 /*
1446 * This is just an acknowledgement of our
1447 * mpt_send_event_request().
1448 */
1449 break;
1450
1451 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1452 switch ((msg->Data[0] >> 12) & 0x0f) {
1453 case 0x00:
1454 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1455 msg->Data[0] & 0xff);
1456 break;
1457 case 0x01:
1458 mpt_prt(mpt, "Phy %d: Link Disabled",
1459 msg->Data[0] & 0xff);
1460 break;
1461 case 0x02:
1462 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1463 msg->Data[0] & 0xff);
1464 break;
1465 case 0x03:
1466 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1467 msg->Data[0] & 0xff);
1468 break;
1469 case 0x08:
1470 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1471 msg->Data[0] & 0xff);
1472 break;
1473 case 0x09:
1474 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1475 msg->Data[0] & 0xff);
1476 break;
1477 default:
1478 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1479 "Unknown event (%0x)",
1480 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1481 }
1482 break;
1483
1484 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1485 case MPI_EVENT_SAS_DISCOVERY:
1486 /* ignore these events for now */
1487 break;
1488
1489 case MPI_EVENT_QUEUE_FULL:
1490 /* This can get a little chatty */
1491 if (mpt->verbose > 0)
1492 mpt_prt(mpt, "Queue Full Event");
1493 break;
1494
1495 default:
1496 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1497 break;
1498 }
1499
1500 if (msg->AckRequired) {
1501 MSG_EVENT_ACK *ackp;
1502 request_t *req;
1503
1504 if ((req = mpt_get_request(mpt)) == NULL) {
1505 /* XXX XXX XXX XXXJRT */
1506 panic("mpt_event_notify_reply: unable to allocate "
1507 "request structure");
1508 }
1509
1510 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1511 memset(ackp, 0, sizeof(*ackp));
1512 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1513 ackp->Event = msg->Event;
1514 ackp->EventContext = msg->EventContext;
1515 ackp->MsgContext = htole32(req->index | 0x80000000);
1516 mpt_check_doorbell(mpt);
1517 mpt_send_cmd(mpt, req);
1518 }
1519 }
1520
1521 static void
mpt_bus_reset(mpt_softc_t * mpt)1522 mpt_bus_reset(mpt_softc_t *mpt)
1523 {
1524 request_t *req;
1525 MSG_SCSI_TASK_MGMT *mngt_req;
1526 int s;
1527
1528 s = splbio();
1529 if (mpt->mngt_req) {
1530 /* request already queued; can't do more */
1531 splx(s);
1532 return;
1533 }
1534 req = mpt_get_request(mpt);
1535 if (__predict_false(req == NULL)) {
1536 mpt_prt(mpt, "no mngt request\n");
1537 splx(s);
1538 return;
1539 }
1540 mpt->mngt_req = req;
1541 splx(s);
1542 mngt_req = req->req_vbuf;
1543 memset(mngt_req, 0, sizeof(*mngt_req));
1544 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1545 mngt_req->Bus = mpt->bus;
1546 mngt_req->TargetID = 0;
1547 mngt_req->ChainOffset = 0;
1548 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1549 mngt_req->Reserved1 = 0;
1550 mngt_req->MsgFlags =
1551 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1552 mngt_req->MsgContext = req->index;
1553 mngt_req->TaskMsgContext = 0;
1554 s = splbio();
1555 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1556 splx(s);
1557 }
1558
1559 /*****************************************************************************
1560 * SCSI interface routines
1561 *****************************************************************************/
1562
1563 static void
mpt_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)1564 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1565 void *arg)
1566 {
1567 struct scsipi_adapter *adapt = chan->chan_adapter;
1568 mpt_softc_t *mpt = device_private(adapt->adapt_dev);
1569
1570 switch (req) {
1571 case ADAPTER_REQ_RUN_XFER:
1572 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1573 return;
1574
1575 case ADAPTER_REQ_GROW_RESOURCES:
1576 /* Not supported. */
1577 return;
1578
1579 case ADAPTER_REQ_SET_XFER_MODE:
1580 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1581 return;
1582 }
1583 }
1584
1585 static void
mpt_minphys(struct buf * bp)1586 mpt_minphys(struct buf *bp)
1587 {
1588
1589 /*
1590 * Subtract one from the SGL limit, since we need an extra one to handle
1591 * an non-page-aligned transfer.
1592 */
1593 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1594
1595 if (bp->b_bcount > MPT_MAX_XFER)
1596 bp->b_bcount = MPT_MAX_XFER;
1597 minphys(bp);
1598 }
1599
1600 static int
mpt_ioctl(struct scsipi_channel * chan,u_long cmd,void * arg,int flag,struct proc * p)1601 mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1602 int flag, struct proc *p)
1603 {
1604 mpt_softc_t *mpt;
1605 int s;
1606
1607 mpt = device_private(chan->chan_adapter->adapt_dev);
1608 switch (cmd) {
1609 case SCBUSIORESET:
1610 mpt_bus_reset(mpt);
1611 s = splbio();
1612 mpt_intr(mpt);
1613 splx(s);
1614 return(0);
1615 default:
1616 return (ENOTTY);
1617 }
1618 }
1619
1620 #if NBIO > 0
1621 static fCONFIG_PAGE_IOC_2 *
mpt_get_cfg_page_ioc2(mpt_softc_t * mpt)1622 mpt_get_cfg_page_ioc2(mpt_softc_t *mpt)
1623 {
1624 fCONFIG_PAGE_HEADER hdr;
1625 fCONFIG_PAGE_IOC_2 *ioc2;
1626 int rv;
1627
1628 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr);
1629 if (rv)
1630 return NULL;
1631
1632 ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1633 if (ioc2 == NULL)
1634 return NULL;
1635
1636 memcpy(ioc2, &hdr, sizeof(hdr));
1637
1638 rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header);
1639 if (rv)
1640 goto fail;
1641 mpt2host_config_page_ioc_2(ioc2);
1642
1643 return ioc2;
1644
1645 fail:
1646 free(ioc2, M_DEVBUF);
1647 return NULL;
1648 }
1649
1650 static fCONFIG_PAGE_IOC_3 *
mpt_get_cfg_page_ioc3(mpt_softc_t * mpt)1651 mpt_get_cfg_page_ioc3(mpt_softc_t *mpt)
1652 {
1653 fCONFIG_PAGE_HEADER hdr;
1654 fCONFIG_PAGE_IOC_3 *ioc3;
1655 int rv;
1656
1657 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr);
1658 if (rv)
1659 return NULL;
1660
1661 ioc3 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1662 if (ioc3 == NULL)
1663 return NULL;
1664
1665 memcpy(ioc3, &hdr, sizeof(hdr));
1666
1667 rv = mpt_read_cfg_page(mpt, 0, &ioc3->Header);
1668 if (rv)
1669 goto fail;
1670
1671 return ioc3;
1672
1673 fail:
1674 free(ioc3, M_DEVBUF);
1675 return NULL;
1676 }
1677
1678
1679 static fCONFIG_PAGE_RAID_VOL_0 *
mpt_get_cfg_page_raid_vol0(mpt_softc_t * mpt,int address)1680 mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address)
1681 {
1682 fCONFIG_PAGE_HEADER hdr;
1683 fCONFIG_PAGE_RAID_VOL_0 *rvol0;
1684 int rv;
1685
1686 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1687 address, &hdr);
1688 if (rv)
1689 return NULL;
1690
1691 rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1692 if (rvol0 == NULL)
1693 return NULL;
1694
1695 memcpy(rvol0, &hdr, sizeof(hdr));
1696
1697 rv = mpt_read_cfg_page(mpt, address, &rvol0->Header);
1698 if (rv)
1699 goto fail;
1700 mpt2host_config_page_raid_vol_0(rvol0);
1701
1702 return rvol0;
1703
1704 fail:
1705 free(rvol0, M_DEVBUF);
1706 return NULL;
1707 }
1708
1709 static fCONFIG_PAGE_RAID_PHYS_DISK_0 *
mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t * mpt,int address)1710 mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address)
1711 {
1712 fCONFIG_PAGE_HEADER hdr;
1713 fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0;
1714 int rv;
1715
1716 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0,
1717 address, &hdr);
1718 if (rv)
1719 return NULL;
1720
1721 physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1722 if (physdisk0 == NULL)
1723 return NULL;
1724
1725 memcpy(physdisk0, &hdr, sizeof(hdr));
1726
1727 rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header);
1728 if (rv)
1729 goto fail;
1730 mpt2host_config_page_raid_phys_disk_0(physdisk0);
1731
1732 return physdisk0;
1733
1734 fail:
1735 free(physdisk0, M_DEVBUF);
1736 return NULL;
1737 }
1738
1739 static bool
mpt_is_raid(mpt_softc_t * mpt)1740 mpt_is_raid(mpt_softc_t *mpt)
1741 {
1742 fCONFIG_PAGE_IOC_2 *ioc2;
1743 bool is_raid = false;
1744
1745 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1746 if (ioc2 == NULL)
1747 return false;
1748
1749 if (ioc2->CapabilitiesFlags != 0xdeadbeef) {
1750 is_raid = !!(ioc2->CapabilitiesFlags &
1751 (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT|
1752 MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT|
1753 MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT));
1754 }
1755
1756 free(ioc2, M_DEVBUF);
1757
1758 return is_raid;
1759 }
1760
1761 static int
mpt_bio_ioctl(device_t dev,u_long cmd,void * addr)1762 mpt_bio_ioctl(device_t dev, u_long cmd, void *addr)
1763 {
1764 mpt_softc_t *mpt = device_private(dev);
1765 int error, s;
1766
1767 KERNEL_LOCK(1, curlwp);
1768 s = splbio();
1769
1770 switch (cmd) {
1771 case BIOCINQ:
1772 error = mpt_bio_ioctl_inq(mpt, addr);
1773 break;
1774 case BIOCVOL:
1775 error = mpt_bio_ioctl_vol(mpt, addr);
1776 break;
1777 case BIOCDISK_NOVOL:
1778 error = mpt_bio_ioctl_disk_novol(mpt, addr);
1779 break;
1780 case BIOCDISK:
1781 error = mpt_bio_ioctl_disk(mpt, addr);
1782 break;
1783 default:
1784 error = EINVAL;
1785 break;
1786 }
1787
1788 splx(s);
1789 KERNEL_UNLOCK_ONE(curlwp);
1790
1791 return error;
1792 }
1793
1794 static int
mpt_bio_ioctl_inq(mpt_softc_t * mpt,struct bioc_inq * bi)1795 mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi)
1796 {
1797 fCONFIG_PAGE_IOC_2 *ioc2;
1798 fCONFIG_PAGE_IOC_3 *ioc3;
1799
1800 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1801 if (ioc2 == NULL)
1802 return EIO;
1803 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1804 if (ioc3 == NULL) {
1805 free(ioc2, M_DEVBUF);
1806 return EIO;
1807 }
1808
1809 strlcpy(bi->bi_dev, device_xname(mpt->sc_dev), sizeof(bi->bi_dev));
1810 bi->bi_novol = ioc2->NumActiveVolumes;
1811 bi->bi_nodisk = ioc3->NumPhysDisks;
1812
1813 free(ioc2, M_DEVBUF);
1814 free(ioc3, M_DEVBUF);
1815
1816 return 0;
1817 }
1818
1819 static int
mpt_bio_ioctl_vol(mpt_softc_t * mpt,struct bioc_vol * bv)1820 mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv)
1821 {
1822 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1823 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1824 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1825 struct scsipi_periph *periph;
1826 struct scsipi_inquiry_data inqbuf;
1827 char vendor[9], product[17], revision[5];
1828 int address;
1829
1830 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1831 if (ioc2 == NULL)
1832 return EIO;
1833
1834 if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes)
1835 goto fail;
1836
1837 ioc2rvol = &ioc2->RaidVolume[bv->bv_volid];
1838 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1839
1840 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1841 if (rvol0 == NULL)
1842 goto fail;
1843
1844 bv->bv_dev[0] = '\0';
1845 bv->bv_vendor[0] = '\0';
1846
1847 periph = scsipi_lookup_periph(&mpt->sc_channel, ioc2rvol->VolumeBus, 0);
1848 if (periph != NULL) {
1849 if (periph->periph_dev != NULL) {
1850 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1851 device_xname(periph->periph_dev));
1852 }
1853 memset(&inqbuf, 0, sizeof(inqbuf));
1854 if (scsipi_inquire(periph, &inqbuf,
1855 XS_CTL_DISCOVERY | XS_CTL_SILENT) == 0) {
1856 strnvisx(vendor, sizeof(vendor),
1857 inqbuf.vendor, sizeof(inqbuf.vendor),
1858 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1859 strnvisx(product, sizeof(product),
1860 inqbuf.product, sizeof(inqbuf.product),
1861 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1862 strnvisx(revision, sizeof(revision),
1863 inqbuf.revision, sizeof(inqbuf.revision),
1864 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1865
1866 snprintf(bv->bv_vendor, sizeof(bv->bv_vendor),
1867 "%s %s %s", vendor, product, revision);
1868 }
1869
1870 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1871 device_xname(periph->periph_dev));
1872 }
1873 bv->bv_nodisk = rvol0->NumPhysDisks;
1874 bv->bv_size = (uint64_t)rvol0->MaxLBA * 512;
1875 bv->bv_stripe_size = rvol0->StripeSize;
1876 bv->bv_percent = -1;
1877 bv->bv_seconds = 0;
1878
1879 switch (rvol0->VolumeStatus.State) {
1880 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
1881 bv->bv_status = BIOC_SVONLINE;
1882 break;
1883 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
1884 bv->bv_status = BIOC_SVDEGRADED;
1885 break;
1886 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1887 bv->bv_status = BIOC_SVOFFLINE;
1888 break;
1889 default:
1890 bv->bv_status = BIOC_SVINVALID;
1891 break;
1892 }
1893
1894 switch (ioc2rvol->VolumeType) {
1895 case MPI_RAID_VOL_TYPE_IS:
1896 bv->bv_level = 0;
1897 break;
1898 case MPI_RAID_VOL_TYPE_IME:
1899 case MPI_RAID_VOL_TYPE_IM:
1900 bv->bv_level = 1;
1901 break;
1902 default:
1903 bv->bv_level = -1;
1904 break;
1905 }
1906
1907 free(ioc2, M_DEVBUF);
1908 free(rvol0, M_DEVBUF);
1909
1910 return 0;
1911
1912 fail:
1913 if (ioc2) free(ioc2, M_DEVBUF);
1914 if (rvol0) free(rvol0, M_DEVBUF);
1915 return EINVAL;
1916 }
1917
1918 static void
mpt_bio_ioctl_disk_common(mpt_softc_t * mpt,struct bioc_disk * bd,int address)1919 mpt_bio_ioctl_disk_common(mpt_softc_t *mpt, struct bioc_disk *bd,
1920 int address)
1921 {
1922 fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL;
1923 char vendor_id[9], product_id[17], product_rev_level[5];
1924
1925 phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address);
1926 if (phys == NULL)
1927 return;
1928
1929 strnvisx(vendor_id, sizeof(vendor_id),
1930 phys->InquiryData.VendorID, sizeof(phys->InquiryData.VendorID),
1931 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1932 strnvisx(product_id, sizeof(product_id),
1933 phys->InquiryData.ProductID, sizeof(phys->InquiryData.ProductID),
1934 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1935 strnvisx(product_rev_level, sizeof(product_rev_level),
1936 phys->InquiryData.ProductRevLevel,
1937 sizeof(phys->InquiryData.ProductRevLevel),
1938 VIS_TRIM|VIS_SAFE|VIS_OCTAL);
1939
1940 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s %s",
1941 vendor_id, product_id, product_rev_level);
1942 strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial));
1943 bd->bd_procdev[0] = '\0';
1944 bd->bd_channel = phys->PhysDiskBus;
1945 bd->bd_target = phys->PhysDiskID;
1946 bd->bd_lun = 0;
1947 bd->bd_size = (uint64_t)phys->MaxLBA * 512;
1948
1949 switch (phys->PhysDiskStatus.State) {
1950 case MPI_PHYSDISK0_STATUS_ONLINE:
1951 bd->bd_status = BIOC_SDONLINE;
1952 break;
1953 case MPI_PHYSDISK0_STATUS_MISSING:
1954 case MPI_PHYSDISK0_STATUS_FAILED:
1955 bd->bd_status = BIOC_SDFAILED;
1956 break;
1957 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1958 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1959 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1960 bd->bd_status = BIOC_SDOFFLINE;
1961 break;
1962 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1963 bd->bd_status = BIOC_SDSCRUB;
1964 break;
1965 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1966 default:
1967 bd->bd_status = BIOC_SDINVALID;
1968 break;
1969 }
1970
1971 free(phys, M_DEVBUF);
1972 }
1973
1974 static int
mpt_bio_ioctl_disk_novol(mpt_softc_t * mpt,struct bioc_disk * bd)1975 mpt_bio_ioctl_disk_novol(mpt_softc_t *mpt, struct bioc_disk *bd)
1976 {
1977 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1978 fCONFIG_PAGE_IOC_3 *ioc3 = NULL;
1979 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1980 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1981 int address, v, d;
1982
1983 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1984 if (ioc2 == NULL)
1985 return EIO;
1986 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1987 if (ioc3 == NULL) {
1988 free(ioc2, M_DEVBUF);
1989 return EIO;
1990 }
1991
1992 if (bd->bd_diskid < 0 || bd->bd_diskid >= ioc3->NumPhysDisks)
1993 goto fail;
1994
1995 address = ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum;
1996
1997 mpt_bio_ioctl_disk_common(mpt, bd, address);
1998
1999 bd->bd_disknovol = true;
2000 for (v = 0; bd->bd_disknovol && v < ioc2->NumActiveVolumes; v++) {
2001 ioc2rvol = &ioc2->RaidVolume[v];
2002 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
2003
2004 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
2005 if (rvol0 == NULL)
2006 continue;
2007
2008 for (d = 0; d < rvol0->NumPhysDisks; d++) {
2009 if (rvol0->PhysDisk[d].PhysDiskNum ==
2010 ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum) {
2011 bd->bd_disknovol = false;
2012 bd->bd_volid = v;
2013 break;
2014 }
2015 }
2016 free(rvol0, M_DEVBUF);
2017 }
2018
2019 free(ioc3, M_DEVBUF);
2020 free(ioc2, M_DEVBUF);
2021
2022 return 0;
2023
2024 fail:
2025 if (ioc3) free(ioc3, M_DEVBUF);
2026 if (ioc2) free(ioc2, M_DEVBUF);
2027 return EINVAL;
2028 }
2029
2030
2031 static int
mpt_bio_ioctl_disk(mpt_softc_t * mpt,struct bioc_disk * bd)2032 mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd)
2033 {
2034 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
2035 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
2036 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
2037 int address;
2038
2039 ioc2 = mpt_get_cfg_page_ioc2(mpt);
2040 if (ioc2 == NULL)
2041 return EIO;
2042
2043 if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes)
2044 goto fail;
2045
2046 ioc2rvol = &ioc2->RaidVolume[bd->bd_volid];
2047 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
2048
2049 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
2050 if (rvol0 == NULL)
2051 goto fail;
2052
2053 if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks)
2054 goto fail;
2055
2056 address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum;
2057
2058 mpt_bio_ioctl_disk_common(mpt, bd, address);
2059
2060 free(ioc2, M_DEVBUF);
2061
2062 return 0;
2063
2064 fail:
2065 if (ioc2) free(ioc2, M_DEVBUF);
2066 return EINVAL;
2067 }
2068 #endif
2069
2070