1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/scsi/scsi.h>
28 #include <sys/vtrace.h>
29
30
31 #define A_TO_TRAN(ap) ((ap)->a_hba_tran)
32 #define P_TO_TRAN(pkt) ((pkt)->pkt_address.a_hba_tran)
33 #define P_TO_ADDR(pkt) (&((pkt)->pkt_address))
34
35 /*
36 * Callback id
37 */
38 uintptr_t scsi_callback_id = 0;
39
40 extern ddi_dma_attr_t scsi_alloc_attr;
41
42 struct buf *
scsi_alloc_consistent_buf(struct scsi_address * ap,struct buf * in_bp,size_t datalen,uint_t bflags,int (* callback)(caddr_t),caddr_t callback_arg)43 scsi_alloc_consistent_buf(struct scsi_address *ap,
44 struct buf *in_bp, size_t datalen, uint_t bflags,
45 int (*callback)(caddr_t), caddr_t callback_arg)
46 {
47 dev_info_t *pdip;
48 struct buf *bp;
49 int kmflag;
50 size_t rlen;
51
52 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_START,
53 "scsi_alloc_consistent_buf_start");
54
55 if (!in_bp) {
56 kmflag = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
57 if ((bp = getrbuf(kmflag)) == NULL) {
58 goto no_resource;
59 }
60 } else {
61 bp = in_bp;
62
63 /* we are establishing a new buffer memory association */
64 bp->b_flags &= ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW);
65 bp->b_proc = NULL;
66 bp->b_pages = NULL;
67 bp->b_shadow = NULL;
68 }
69
70 /* limit bits that can be set by bflags argument */
71 ASSERT(!(bflags & ~(B_READ | B_WRITE)));
72 bflags &= (B_READ | B_WRITE);
73 bp->b_un.b_addr = 0;
74
75 if (datalen) {
76 pdip = (A_TO_TRAN(ap))->tran_hba_dip;
77
78 /*
79 * use i_ddi_mem_alloc() for now until we have an interface to
80 * allocate memory for DMA which doesn't require a DMA handle.
81 * ddi_iopb_alloc() is obsolete and we want more flexibility in
82 * controlling the DMA address constraints.
83 */
84 while (i_ddi_mem_alloc(pdip, &scsi_alloc_attr, datalen,
85 ((callback == SLEEP_FUNC) ? 1 : 0), 0, NULL,
86 &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
87 if (callback == SLEEP_FUNC) {
88 delay(drv_usectohz(10000));
89 } else {
90 if (!in_bp)
91 freerbuf(bp);
92 goto no_resource;
93 }
94 }
95 bp->b_flags |= bflags;
96 }
97 bp->b_bcount = datalen;
98 bp->b_resid = 0;
99
100 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_END,
101 "scsi_alloc_consistent_buf_end");
102 return (bp);
103
104 no_resource:
105
106 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
107 ddi_set_callback(callback, callback_arg,
108 &scsi_callback_id);
109 }
110 TRACE_0(TR_FAC_SCSI_RES,
111 TR_SCSI_ALLOC_CONSISTENT_BUF_RETURN1_END,
112 "scsi_alloc_consistent_buf_end (return1)");
113 return (NULL);
114 }
115
116 void
scsi_free_consistent_buf(struct buf * bp)117 scsi_free_consistent_buf(struct buf *bp)
118 {
119 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_START,
120 "scsi_free_consistent_buf_start");
121 if (!bp)
122 return;
123 if (bp->b_un.b_addr)
124 i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
125 freerbuf(bp);
126 if (scsi_callback_id != 0) {
127 ddi_run_callback(&scsi_callback_id);
128 }
129 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_END,
130 "scsi_free_consistent_buf_end");
131 }
132
133 void
scsi_dmafree_attr(struct scsi_pkt * pktp)134 scsi_dmafree_attr(struct scsi_pkt *pktp)
135 {
136 struct scsi_pkt_cache_wrapper *pktw =
137 (struct scsi_pkt_cache_wrapper *)pktp;
138
139 if (pktw->pcw_flags & PCW_BOUND) {
140 if (ddi_dma_unbind_handle(pktp->pkt_handle) !=
141 DDI_SUCCESS)
142 cmn_err(CE_WARN, "scsi_dmafree_attr: "
143 "unbind handle failed");
144 pktw->pcw_flags &= ~PCW_BOUND;
145 }
146 pktp->pkt_numcookies = 0;
147 pktw->pcw_totalwin = 0;
148 }
149
150 struct buf *
scsi_pkt2bp(struct scsi_pkt * pkt)151 scsi_pkt2bp(struct scsi_pkt *pkt)
152 {
153 return (((struct scsi_pkt_cache_wrapper *)pkt)->pcw_bp);
154 }
155
156 int
scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper * pktw,struct buf * bp,int dma_flags,int (* callback)(),caddr_t arg)157 scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper *pktw,
158 struct buf *bp,
159 int dma_flags,
160 int (*callback)(),
161 caddr_t arg)
162 {
163 struct scsi_pkt *pktp = &(pktw->pcw_pkt);
164 int status;
165
166 /*
167 * First time, need to establish the handle.
168 */
169
170 ASSERT(pktp->pkt_numcookies == 0);
171 ASSERT(pktw->pcw_totalwin == 0);
172
173 status = ddi_dma_buf_bind_handle(pktp->pkt_handle, bp, dma_flags,
174 callback, arg, &pktw->pcw_cookie,
175 &pktp->pkt_numcookies);
176
177 switch (status) {
178 case DDI_DMA_MAPPED:
179 pktw->pcw_totalwin = 1;
180 break;
181
182 case DDI_DMA_PARTIAL_MAP:
183 /* enable first call to ddi_dma_getwin */
184 if (ddi_dma_numwin(pktp->pkt_handle,
185 &pktw->pcw_totalwin) != DDI_SUCCESS) {
186 bp->b_error = 0;
187 return (0);
188 }
189 break;
190
191 case DDI_DMA_NORESOURCES:
192 bp->b_error = 0;
193 return (0);
194
195 case DDI_DMA_TOOBIG:
196 bioerror(bp, EINVAL);
197 return (0);
198
199 case DDI_DMA_NOMAPPING:
200 case DDI_DMA_INUSE:
201 default:
202 bioerror(bp, EFAULT);
203 return (0);
204 }
205
206 /* initialize the loop controls for scsi_dmaget_attr() */
207 pktw->pcw_curwin = 0;
208 pktw->pcw_total_xfer = 0;
209 pktp->pkt_dma_flags = dma_flags;
210 return (1);
211 }
212
213 #if defined(_DMA_USES_PHYSADDR)
214 int
scsi_dmaget_attr(struct scsi_pkt_cache_wrapper * pktw)215 scsi_dmaget_attr(struct scsi_pkt_cache_wrapper *pktw)
216 {
217 struct scsi_pkt *pktp = &(pktw->pcw_pkt);
218
219 int status;
220 int num_segs = 0;
221 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)pktp->pkt_handle;
222 ddi_dma_cookie_t *cp;
223
224 if (pktw->pcw_curwin != 0) {
225 ddi_dma_cookie_t cookie;
226
227 /*
228 * start the next window, and get its first cookie
229 */
230 status = ddi_dma_getwin(pktp->pkt_handle,
231 pktw->pcw_curwin, &pktp->pkt_dma_offset,
232 &pktp->pkt_dma_len, &cookie,
233 &pktp->pkt_numcookies);
234 if (status != DDI_SUCCESS)
235 return (0);
236 }
237
238 /*
239 * start the Scatter/Gather loop
240 */
241 cp = hp->dmai_cookie - 1;
242 pktp->pkt_dma_len = 0;
243 for (;;) {
244
245 /* take care of the loop-bookkeeping */
246 pktp->pkt_dma_len += cp->dmac_size;
247 num_segs++;
248 /*
249 * if this was the last cookie in the current window
250 * set the loop controls start the next window and
251 * exit so the HBA can do this partial transfer
252 */
253 if (num_segs >= pktp->pkt_numcookies) {
254 pktw->pcw_curwin++;
255 break;
256 }
257
258 cp++;
259 }
260 pktw->pcw_total_xfer += pktp->pkt_dma_len;
261 pktp->pkt_cookies = hp->dmai_cookie - 1;
262 hp->dmai_cookie = cp;
263
264 return (1);
265 }
266 #endif
267
268 void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *);
269
270 struct scsi_pkt *
scsi_init_cache_pkt(struct scsi_address * ap,struct scsi_pkt * in_pktp,struct buf * bp,int cmdlen,int statuslen,int pplen,int flags,int (* callback)(caddr_t),caddr_t callback_arg)271 scsi_init_cache_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
272 struct buf *bp, int cmdlen, int statuslen, int pplen,
273 int flags, int (*callback)(caddr_t), caddr_t callback_arg)
274 {
275 struct scsi_pkt_cache_wrapper *pktw;
276 scsi_hba_tran_t *tranp = ap->a_hba_tran;
277 int (*func)(caddr_t);
278
279 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
280
281 if (in_pktp == NULL) {
282 int kf;
283
284 if (callback == SLEEP_FUNC)
285 kf = KM_SLEEP;
286 else
287 kf = KM_NOSLEEP;
288 /*
289 * By using kmem_cache_alloc(), the layout of the
290 * scsi_pkt, scsi_pkt_cache_wrapper, hba private data,
291 * cdb, tgt driver private data, and status block is
292 * as below.
293 *
294 * This is a piece of contiguous memory starting from
295 * the first structure field scsi_pkt in the struct
296 * scsi_pkt_cache_wrapper, followed by the hba private
297 * data, pkt_cdbp, the tgt driver private data and
298 * pkt_scbp.
299 *
300 * |----------------------------|--------------------->
301 * | struct scsi_pkt | struct
302 * | ...... |scsi_pkt_cache_wrapper
303 * | pcw_flags |
304 * |----------------------------|<---------------------
305 * | hba private data |tranp->tran_hba_len
306 * |----------------------------|
307 * | pkt_cdbp |DEFAULT_CDBLEN
308 * |----------------------------|
309 * | tgt private data |DEFAULT_PRIVLEN
310 * |----------------------------|
311 * | pkt_scbp |DEFAULT_SCBLEN
312 * |----------------------------|
313 *
314 * If the actual data length of the cdb, or the tgt
315 * driver private data, or the status block is bigger
316 * than the default data length, kmem_alloc() will be
317 * called to get extra space.
318 */
319 pktw = kmem_cache_alloc(tranp->tran_pkt_cache_ptr,
320 kf);
321 if (pktw == NULL)
322 goto fail1;
323
324 pktw->pcw_flags = 0;
325 in_pktp = &(pktw->pcw_pkt);
326 in_pktp->pkt_address = *ap;
327
328 /*
329 * target drivers should initialize pkt_comp and
330 * pkt_time, but sometimes they don't so initialize
331 * them here to be safe.
332 */
333 in_pktp->pkt_flags = 0;
334 in_pktp->pkt_time = 0;
335 in_pktp->pkt_resid = 0;
336 in_pktp->pkt_state = 0;
337 in_pktp->pkt_statistics = 0;
338 in_pktp->pkt_reason = 0;
339 in_pktp->pkt_dma_offset = 0;
340 in_pktp->pkt_dma_len = 0;
341 in_pktp->pkt_dma_flags = 0;
342 in_pktp->pkt_path_instance = 0;
343 ASSERT(in_pktp->pkt_numcookies == 0);
344 pktw->pcw_curwin = 0;
345 pktw->pcw_totalwin = 0;
346 pktw->pcw_total_xfer = 0;
347
348 in_pktp->pkt_cdblen = cmdlen;
349 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_CDB) &&
350 (cmdlen > DEFAULT_CDBLEN)) {
351 pktw->pcw_flags |= PCW_NEED_EXT_CDB;
352 in_pktp->pkt_cdbp = kmem_alloc(cmdlen, kf);
353 if (in_pktp->pkt_cdbp == NULL)
354 goto fail2;
355 }
356 in_pktp->pkt_tgtlen = pplen;
357 if (pplen > DEFAULT_PRIVLEN) {
358 pktw->pcw_flags |= PCW_NEED_EXT_TGT;
359 in_pktp->pkt_private = kmem_alloc(pplen, kf);
360 if (in_pktp->pkt_private == NULL)
361 goto fail3;
362 }
363 in_pktp->pkt_scblen = statuslen;
364 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_SCB) &&
365 (statuslen > DEFAULT_SCBLEN)) {
366 pktw->pcw_flags |= PCW_NEED_EXT_SCB;
367 in_pktp->pkt_scbp = kmem_alloc(statuslen, kf);
368 if (in_pktp->pkt_scbp == NULL)
369 goto fail4;
370 }
371 if ((*tranp->tran_setup_pkt) (in_pktp,
372 func, NULL) == -1) {
373 goto fail5;
374 }
375 if (cmdlen)
376 bzero((void *)in_pktp->pkt_cdbp, cmdlen);
377 if (pplen)
378 bzero((void *)in_pktp->pkt_private, pplen);
379 if (statuslen)
380 bzero((void *)in_pktp->pkt_scbp, statuslen);
381 } else
382 pktw = (struct scsi_pkt_cache_wrapper *)in_pktp;
383
384 if (bp && bp->b_bcount) {
385
386 int dma_flags = 0;
387
388 /*
389 * we need to transfer data, so we alloc dma resources
390 * for this packet
391 */
392 /*CONSTCOND*/
393 ASSERT(SLEEP_FUNC == DDI_DMA_SLEEP);
394 /*CONSTCOND*/
395 ASSERT(NULL_FUNC == DDI_DMA_DONTWAIT);
396
397 #if defined(_DMA_USES_PHYSADDR)
398 /*
399 * with an IOMMU we map everything, so we don't
400 * need to bother with this
401 */
402 if (tranp->tran_dma_attr.dma_attr_granular !=
403 pktw->pcw_granular) {
404
405 ddi_dma_free_handle(&in_pktp->pkt_handle);
406 if (ddi_dma_alloc_handle(tranp->tran_hba_dip,
407 &tranp->tran_dma_attr,
408 func, NULL,
409 &in_pktp->pkt_handle) != DDI_SUCCESS) {
410
411 in_pktp->pkt_handle = NULL;
412 return (NULL);
413 }
414 pktw->pcw_granular =
415 tranp->tran_dma_attr.dma_attr_granular;
416 }
417 #endif
418
419 if (in_pktp->pkt_numcookies == 0) {
420 pktw->pcw_bp = bp;
421 /*
422 * set dma flags; the "read" case must be first
423 * since B_WRITE isn't always be set for writes.
424 */
425 if (bp->b_flags & B_READ) {
426 dma_flags |= DDI_DMA_READ;
427 } else {
428 dma_flags |= DDI_DMA_WRITE;
429 }
430 if (flags & PKT_CONSISTENT)
431 dma_flags |= DDI_DMA_CONSISTENT;
432 if (flags & PKT_DMA_PARTIAL)
433 dma_flags |= DDI_DMA_PARTIAL;
434
435 #if defined(__sparc)
436 /*
437 * workaround for byte hole issue on psycho and
438 * schizo pre 2.1
439 */
440 if ((bp->b_flags & B_READ) && ((bp->b_flags &
441 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
442 (((uintptr_t)bp->b_un.b_addr & 0x7) ||
443 ((uintptr_t)bp->b_bcount & 0x7))) {
444 dma_flags |= DDI_DMA_CONSISTENT;
445 }
446 #endif
447 if (!scsi_dma_buf_bind_attr(pktw, bp,
448 dma_flags, callback, callback_arg)) {
449 return (NULL);
450 } else {
451 pktw->pcw_flags |= PCW_BOUND;
452 }
453 }
454
455 #if defined(_DMA_USES_PHYSADDR)
456 if (!scsi_dmaget_attr(pktw)) {
457 scsi_dmafree_attr(in_pktp);
458 goto fail5;
459 }
460 #else
461 in_pktp->pkt_cookies = &pktw->pcw_cookie;
462 in_pktp->pkt_dma_len = pktw->pcw_cookie.dmac_size;
463 pktw->pcw_total_xfer += in_pktp->pkt_dma_len;
464 #endif
465 ASSERT(in_pktp->pkt_numcookies <=
466 tranp->tran_dma_attr.dma_attr_sgllen);
467 ASSERT(pktw->pcw_total_xfer <= bp->b_bcount);
468 in_pktp->pkt_resid = bp->b_bcount -
469 pktw->pcw_total_xfer;
470
471 ASSERT((in_pktp->pkt_resid % pktw->pcw_granular) ==
472 0);
473 } else {
474 /* !bp or no b_bcount */
475 in_pktp->pkt_resid = 0;
476 }
477 return (in_pktp);
478
479 fail5:
480 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
481 kmem_free(in_pktp->pkt_scbp, statuslen);
482 in_pktp->pkt_scbp = (opaque_t)((char *)in_pktp +
483 tranp->tran_hba_len + DEFAULT_PRIVLEN +
484 sizeof (struct scsi_pkt_cache_wrapper));
485 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
486 in_pktp->pkt_scbp = (opaque_t)((in_pktp->pkt_scbp) +
487 DEFAULT_CDBLEN);
488 in_pktp->pkt_scblen = 0;
489 }
490 fail4:
491 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
492 kmem_free(in_pktp->pkt_private, pplen);
493 in_pktp->pkt_tgtlen = 0;
494 in_pktp->pkt_private = NULL;
495 }
496 fail3:
497 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
498 kmem_free(in_pktp->pkt_cdbp, cmdlen);
499 in_pktp->pkt_cdbp = (opaque_t)((char *)in_pktp +
500 tranp->tran_hba_len +
501 sizeof (struct scsi_pkt_cache_wrapper));
502 in_pktp->pkt_cdblen = 0;
503 }
504 pktw->pcw_flags &=
505 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
506 fail2:
507 kmem_cache_free(tranp->tran_pkt_cache_ptr, pktw);
508 fail1:
509 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
510 ddi_set_callback(callback, callback_arg,
511 &scsi_callback_id);
512 }
513
514 return (NULL);
515 }
516
517 void
scsi_free_cache_pkt(struct scsi_address * ap,struct scsi_pkt * pktp)518 scsi_free_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pktp)
519 {
520 struct scsi_pkt_cache_wrapper *pktw;
521
522 (*A_TO_TRAN(ap)->tran_teardown_pkt)(pktp);
523 pktw = (struct scsi_pkt_cache_wrapper *)pktp;
524 if (pktw->pcw_flags & PCW_BOUND)
525 scsi_dmafree_attr(pktp);
526
527 /*
528 * if we allocated memory for anything that wouldn't fit, free
529 * the memory and restore the pointers
530 */
531 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
532 kmem_free(pktp->pkt_scbp, pktp->pkt_scblen);
533 pktp->pkt_scbp = (opaque_t)((char *)pktp +
534 (A_TO_TRAN(ap))->tran_hba_len +
535 DEFAULT_PRIVLEN + sizeof (struct scsi_pkt_cache_wrapper));
536 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
537 pktp->pkt_scbp = (opaque_t)((pktp->pkt_scbp) +
538 DEFAULT_CDBLEN);
539 pktp->pkt_scblen = 0;
540 }
541 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
542 kmem_free(pktp->pkt_private, pktp->pkt_tgtlen);
543 pktp->pkt_tgtlen = 0;
544 pktp->pkt_private = NULL;
545 }
546 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
547 kmem_free(pktp->pkt_cdbp, pktp->pkt_cdblen);
548 pktp->pkt_cdbp = (opaque_t)((char *)pktp +
549 (A_TO_TRAN(ap))->tran_hba_len +
550 sizeof (struct scsi_pkt_cache_wrapper));
551 pktp->pkt_cdblen = 0;
552 }
553 pktw->pcw_flags &=
554 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
555 kmem_cache_free(A_TO_TRAN(ap)->tran_pkt_cache_ptr, pktw);
556
557 if (scsi_callback_id != 0) {
558 ddi_run_callback(&scsi_callback_id);
559 }
560
561 }
562
563
564 struct scsi_pkt *
scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * in_pktp,struct buf * bp,int cmdlen,int statuslen,int pplen,int flags,int (* callback)(caddr_t),caddr_t callback_arg)565 scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
566 struct buf *bp, int cmdlen, int statuslen, int pplen,
567 int flags, int (*callback)(caddr_t), caddr_t callback_arg)
568 {
569 struct scsi_pkt *pktp;
570 scsi_hba_tran_t *tranp = ap->a_hba_tran;
571 int (*func)(caddr_t);
572
573 TRACE_5(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_START,
574 "scsi_init_pkt_start: addr %p in_pktp %p cmdlen %d statuslen %d pplen %d",
575 ap, in_pktp, cmdlen, statuslen, pplen);
576
577 #if defined(__i386) || defined(__amd64)
578 if (flags & PKT_CONSISTENT_OLD) {
579 flags &= ~PKT_CONSISTENT_OLD;
580 flags |= PKT_CONSISTENT;
581 }
582 #endif
583
584 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
585
586 pktp = (*tranp->tran_init_pkt) (ap, in_pktp, bp, cmdlen,
587 statuslen, pplen, flags, func, NULL);
588 if (pktp == NULL) {
589 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
590 ddi_set_callback(callback, callback_arg,
591 &scsi_callback_id);
592 }
593 }
594
595 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_END,
596 "scsi_init_pkt_end: pktp %p", pktp);
597 return (pktp);
598 }
599
600 void
scsi_destroy_pkt(struct scsi_pkt * pkt)601 scsi_destroy_pkt(struct scsi_pkt *pkt)
602 {
603 struct scsi_address *ap = P_TO_ADDR(pkt);
604
605 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_START,
606 "scsi_destroy_pkt_start: pkt %p", pkt);
607
608 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
609
610 if (scsi_callback_id != 0) {
611 ddi_run_callback(&scsi_callback_id);
612 }
613
614 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_END,
615 "scsi_destroy_pkt_end");
616 }
617
618
619 /*
620 * Generic Resource Allocation Routines
621 */
622
623 struct scsi_pkt *
scsi_resalloc(struct scsi_address * ap,int cmdlen,int statuslen,opaque_t dmatoken,int (* callback)())624 scsi_resalloc(struct scsi_address *ap, int cmdlen, int statuslen,
625 opaque_t dmatoken, int (*callback)())
626 {
627 register struct scsi_pkt *pkt;
628 register scsi_hba_tran_t *tranp = ap->a_hba_tran;
629 register int (*func)(caddr_t);
630
631 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
632
633 pkt = (*tranp->tran_init_pkt) (ap, NULL, (struct buf *)dmatoken,
634 cmdlen, statuslen, 0, 0, func, NULL);
635 if (pkt == NULL) {
636 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
637 ddi_set_callback(callback, NULL, &scsi_callback_id);
638 }
639 }
640
641 return (pkt);
642 }
643
644 struct scsi_pkt *
scsi_pktalloc(struct scsi_address * ap,int cmdlen,int statuslen,int (* callback)())645 scsi_pktalloc(struct scsi_address *ap, int cmdlen, int statuslen,
646 int (*callback)())
647 {
648 struct scsi_pkt *pkt;
649 struct scsi_hba_tran *tran = ap->a_hba_tran;
650 register int (*func)(caddr_t);
651
652 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
653
654 pkt = (*tran->tran_init_pkt) (ap, NULL, NULL, cmdlen,
655 statuslen, 0, 0, func, NULL);
656 if (pkt == NULL) {
657 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
658 ddi_set_callback(callback, NULL, &scsi_callback_id);
659 }
660 }
661
662 return (pkt);
663 }
664
665 struct scsi_pkt *
scsi_dmaget(struct scsi_pkt * pkt,opaque_t dmatoken,int (* callback)())666 scsi_dmaget(struct scsi_pkt *pkt, opaque_t dmatoken, int (*callback)())
667 {
668 struct scsi_pkt *new_pkt;
669 register int (*func)(caddr_t);
670
671 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
672
673 new_pkt = (*P_TO_TRAN(pkt)->tran_init_pkt) (&pkt->pkt_address,
674 pkt, (struct buf *)dmatoken,
675 0, 0, 0, 0, func, NULL);
676 ASSERT(new_pkt == pkt || new_pkt == NULL);
677 if (new_pkt == NULL) {
678 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
679 ddi_set_callback(callback, NULL, &scsi_callback_id);
680 }
681 }
682
683 return (new_pkt);
684 }
685
686
687 /*
688 * Generic Resource Deallocation Routines
689 */
690
691 void
scsi_dmafree(struct scsi_pkt * pkt)692 scsi_dmafree(struct scsi_pkt *pkt)
693 {
694 register struct scsi_address *ap = P_TO_ADDR(pkt);
695
696 (*A_TO_TRAN(ap)->tran_dmafree)(ap, pkt);
697
698 if (scsi_callback_id != 0) {
699 ddi_run_callback(&scsi_callback_id);
700 }
701 }
702
703 /*ARGSUSED*/
704 void
scsi_cache_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)705 scsi_cache_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
706 {
707 ASSERT(pkt->pkt_numcookies == 0 ||
708 ((struct scsi_pkt_cache_wrapper *)pkt)->pcw_flags & PCW_BOUND);
709 ASSERT(pkt->pkt_handle != NULL);
710 scsi_dmafree_attr(pkt);
711
712 if (scsi_callback_id != 0) {
713 ddi_run_callback(&scsi_callback_id);
714 }
715 }
716
717 void
scsi_sync_pkt(struct scsi_pkt * pkt)718 scsi_sync_pkt(struct scsi_pkt *pkt)
719 {
720 register struct scsi_address *ap = P_TO_ADDR(pkt);
721
722 if (pkt->pkt_state & STATE_XFERRED_DATA)
723 (*A_TO_TRAN(ap)->tran_sync_pkt)(ap, pkt);
724 }
725
726 /*ARGSUSED*/
727 void
scsi_sync_cache_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)728 scsi_sync_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
729 {
730 if (pkt->pkt_handle &&
731 (pkt->pkt_dma_flags & (DDI_DMA_WRITE | DDI_DMA_READ))) {
732 (void) ddi_dma_sync(pkt->pkt_handle,
733 pkt->pkt_dma_offset, pkt->pkt_dma_len,
734 (pkt->pkt_dma_flags & DDI_DMA_WRITE) ?
735 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
736 }
737 }
738
739 void
scsi_resfree(struct scsi_pkt * pkt)740 scsi_resfree(struct scsi_pkt *pkt)
741 {
742 register struct scsi_address *ap = P_TO_ADDR(pkt);
743 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
744
745 if (scsi_callback_id != 0) {
746 ddi_run_callback(&scsi_callback_id);
747 }
748 }
749