1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * hci1394_ixl_comp.c
31*0Sstevel@tonic-gate  *    Isochronous IXL Compiler.
32*0Sstevel@tonic-gate  *    The compiler converts the general hardware independent IXL command
33*0Sstevel@tonic-gate  *    blocks into OpenHCI DMA descriptors.
34*0Sstevel@tonic-gate  */
35*0Sstevel@tonic-gate 
36*0Sstevel@tonic-gate #include <sys/kmem.h>
37*0Sstevel@tonic-gate #include <sys/types.h>
38*0Sstevel@tonic-gate #include <sys/conf.h>
39*0Sstevel@tonic-gate #include <sys/ddi.h>
40*0Sstevel@tonic-gate #include <sys/sunddi.h>
41*0Sstevel@tonic-gate 
42*0Sstevel@tonic-gate #include <sys/tnf_probe.h>
43*0Sstevel@tonic-gate 
44*0Sstevel@tonic-gate #include <sys/1394/h1394.h>
45*0Sstevel@tonic-gate #include <sys/1394/ixl1394.h>
46*0Sstevel@tonic-gate #include <sys/1394/adapters/hci1394.h>
47*0Sstevel@tonic-gate 
48*0Sstevel@tonic-gate /* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
49*0Sstevel@tonic-gate #define	HCI1394_IXL_PAGESIZE	8000
50*0Sstevel@tonic-gate 
51*0Sstevel@tonic-gate /* invalid opcode */
52*0Sstevel@tonic-gate #define	IXL1394_OP_INVALID  (0 | IXL1394_OPTY_OTHER)
53*0Sstevel@tonic-gate 
54*0Sstevel@tonic-gate /*
55*0Sstevel@tonic-gate  * maximum number of interrupts permitted for a single context in which
56*0Sstevel@tonic-gate  * the context does not advance to the next DMA descriptor.  Interrupts are
57*0Sstevel@tonic-gate  * triggered by 1) hardware completing a DMA descriptor block which has the
58*0Sstevel@tonic-gate  * interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
59*0Sstevel@tonic-gate  * interrupt.  Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
60*0Sstevel@tonic-gate  * returned.
61*0Sstevel@tonic-gate  */
62*0Sstevel@tonic-gate int hci1394_ixl_max_noadv_intrs = 8;
63*0Sstevel@tonic-gate 
64*0Sstevel@tonic-gate 
65*0Sstevel@tonic-gate static void hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
66*0Sstevel@tonic-gate     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
67*0Sstevel@tonic-gate     ixl1394_command_t *ixlp);
68*0Sstevel@tonic-gate static void hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp);
69*0Sstevel@tonic-gate static void hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp,
70*0Sstevel@tonic-gate     ixl1394_command_t *ixlp);
71*0Sstevel@tonic-gate static void hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
72*0Sstevel@tonic-gate static void hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
73*0Sstevel@tonic-gate static void hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
74*0Sstevel@tonic-gate static void hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp);
75*0Sstevel@tonic-gate static void hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp);
76*0Sstevel@tonic-gate static void hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
77*0Sstevel@tonic-gate static void hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp);
78*0Sstevel@tonic-gate static void hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp);
79*0Sstevel@tonic-gate static int hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp,
80*0Sstevel@tonic-gate     caddr_t *dma_descpp, uint32_t *dma_desc_bound);
81*0Sstevel@tonic-gate static void hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp);
82*0Sstevel@tonic-gate static void hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp);
83*0Sstevel@tonic-gate static void hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp);
84*0Sstevel@tonic-gate static int hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp,
85*0Sstevel@tonic-gate     uint32_t bufp, uint16_t size);
86*0Sstevel@tonic-gate static int hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp,
87*0Sstevel@tonic-gate     uint32_t count);
88*0Sstevel@tonic-gate static int hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp);
89*0Sstevel@tonic-gate static uint32_t hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp);
90*0Sstevel@tonic-gate static hci1394_xfer_ctl_t *hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp,
91*0Sstevel@tonic-gate     uint32_t dmacnt);
92*0Sstevel@tonic-gate static void *hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp,
93*0Sstevel@tonic-gate     uint32_t size, uint32_t *dma_bound);
94*0Sstevel@tonic-gate static boolean_t hci1394_is_opcode_valid(uint16_t ixlopcode);
95*0Sstevel@tonic-gate 
96*0Sstevel@tonic-gate 
97*0Sstevel@tonic-gate /*
98*0Sstevel@tonic-gate  * FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
99*0Sstevel@tonic-gate  * Receive Only:			Transmit Only:
100*0Sstevel@tonic-gate  *    IXL1394_OP_RECV_PKT_ST		    IXL1394_OP_SEND_PKT_WHDR_ST
101*0Sstevel@tonic-gate  *    IXL1394_OP_RECV_PKT		    IXL1394_OP_SEND_PKT_ST
102*0Sstevel@tonic-gate  *    IXL1394_OP_RECV_BUF		    IXL1394_OP_SEND_PKT
103*0Sstevel@tonic-gate  *    IXL1394_OP_SET_SYNCWAIT		    IXL1394_OP_SEND_BUF
104*0Sstevel@tonic-gate  *					    IXL1394_OP_SEND_HDR_ONLY
105*0Sstevel@tonic-gate  * Receive or Transmit:			    IXL1394_OP_SEND_NO_PKT
106*0Sstevel@tonic-gate  *    IXL1394_OP_CALLBACK		    IXL1394_OP_SET_TAGSYNC
107*0Sstevel@tonic-gate  *    IXL1394_OP_LABEL			    IXL1394_OP_SET_SKIPMODE
108*0Sstevel@tonic-gate  *    IXL1394_OP_JUMP			    IXL1394_OP_STORE_TIMESTAMP
109*0Sstevel@tonic-gate  */
110*0Sstevel@tonic-gate 
111*0Sstevel@tonic-gate /*
112*0Sstevel@tonic-gate  * hci1394_compile_ixl()
113*0Sstevel@tonic-gate  *    Top level ixl compiler entry point.  Scans ixl and builds openHCI 1.0
114*0Sstevel@tonic-gate  *    descriptor blocks in dma memory.
115*0Sstevel@tonic-gate  */
116*0Sstevel@tonic-gate int
hci1394_compile_ixl(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp,int * resultp)117*0Sstevel@tonic-gate hci1394_compile_ixl(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
118*0Sstevel@tonic-gate     ixl1394_command_t *ixlp, int *resultp)
119*0Sstevel@tonic-gate {
120*0Sstevel@tonic-gate 	hci1394_comp_ixl_vars_t wv;	/* working variables used throughout */
121*0Sstevel@tonic-gate 
122*0Sstevel@tonic-gate 	ASSERT(soft_statep != NULL);
123*0Sstevel@tonic-gate 	ASSERT(ctxtp != NULL);
124*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_enter,
125*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
126*0Sstevel@tonic-gate 
127*0Sstevel@tonic-gate 	/* Initialize compiler working variables */
128*0Sstevel@tonic-gate 	hci1394_compile_ixl_init(&wv, soft_statep, ctxtp, ixlp);
129*0Sstevel@tonic-gate 
130*0Sstevel@tonic-gate 	/*
131*0Sstevel@tonic-gate 	 * First pass:
132*0Sstevel@tonic-gate 	 *    Parse ixl commands, building desc blocks, until end of IXL
133*0Sstevel@tonic-gate 	 *    linked list.
134*0Sstevel@tonic-gate 	 */
135*0Sstevel@tonic-gate 	hci1394_parse_ixl(&wv, ixlp);
136*0Sstevel@tonic-gate 
137*0Sstevel@tonic-gate 	/*
138*0Sstevel@tonic-gate 	 * Second pass:
139*0Sstevel@tonic-gate 	 *    Resolve all generated descriptor block jump and skip addresses.
140*0Sstevel@tonic-gate 	 *    Set interrupt enable in descriptor blocks which have callback
141*0Sstevel@tonic-gate 	 *    operations in their execution scope. (Previously store_timesamp
142*0Sstevel@tonic-gate 	 *    operations were counted also.) Set interrupt enable in descriptor
143*0Sstevel@tonic-gate 	 *    blocks which were introduced by an ixl label command.
144*0Sstevel@tonic-gate 	 */
145*0Sstevel@tonic-gate 	if (wv.dma_bld_error == 0) {
146*0Sstevel@tonic-gate 		hci1394_finalize_all_xfer_desc(&wv);
147*0Sstevel@tonic-gate 	}
148*0Sstevel@tonic-gate 
149*0Sstevel@tonic-gate 	/* Endup: finalize and cleanup ixl compile, return result */
150*0Sstevel@tonic-gate 	hci1394_compile_ixl_endup(&wv);
151*0Sstevel@tonic-gate 
152*0Sstevel@tonic-gate 	*resultp = wv.dma_bld_error;
153*0Sstevel@tonic-gate 	if (*resultp != 0) {
154*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
155*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
156*0Sstevel@tonic-gate 		return (DDI_FAILURE);
157*0Sstevel@tonic-gate 	} else {
158*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
159*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
160*0Sstevel@tonic-gate 		return (DDI_SUCCESS);
161*0Sstevel@tonic-gate 	}
162*0Sstevel@tonic-gate }
163*0Sstevel@tonic-gate 
164*0Sstevel@tonic-gate /*
165*0Sstevel@tonic-gate  * hci1394_compile_ixl_init()
166*0Sstevel@tonic-gate  *    Initialize the isoch context structure associated with the IXL
167*0Sstevel@tonic-gate  *    program, and initialize the temporary working variables structure.
168*0Sstevel@tonic-gate  */
169*0Sstevel@tonic-gate static void
hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t * wvp,hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp)170*0Sstevel@tonic-gate hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
171*0Sstevel@tonic-gate     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
172*0Sstevel@tonic-gate     ixl1394_command_t *ixlp)
173*0Sstevel@tonic-gate {
174*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_enter,
175*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
176*0Sstevel@tonic-gate 
177*0Sstevel@tonic-gate 	/* initialize common recv/xmit compile values */
178*0Sstevel@tonic-gate 	wvp->soft_statep = soft_statep;
179*0Sstevel@tonic-gate 	wvp->ctxtp = ctxtp;
180*0Sstevel@tonic-gate 
181*0Sstevel@tonic-gate 	/* init/clear ctxtp values */
182*0Sstevel@tonic-gate 	ctxtp->dma_mem_execp = NULL;
183*0Sstevel@tonic-gate 	ctxtp->dma_firstp = NULL;
184*0Sstevel@tonic-gate 	ctxtp->dma_last_time = 0;
185*0Sstevel@tonic-gate 	ctxtp->xcs_firstp = NULL;
186*0Sstevel@tonic-gate 	ctxtp->ixl_exec_depth = 0;
187*0Sstevel@tonic-gate 	ctxtp->ixl_execp = NULL;
188*0Sstevel@tonic-gate 	ctxtp->ixl_firstp = ixlp;
189*0Sstevel@tonic-gate 	ctxtp->default_skipxferp = NULL;
190*0Sstevel@tonic-gate 
191*0Sstevel@tonic-gate 	/*
192*0Sstevel@tonic-gate 	 * the context's max_noadv_intrs is set here instead of in isoch init
193*0Sstevel@tonic-gate 	 * because the default is patchable and would only be picked up this way
194*0Sstevel@tonic-gate 	 */
195*0Sstevel@tonic-gate 	ctxtp->max_noadv_intrs = hci1394_ixl_max_noadv_intrs;
196*0Sstevel@tonic-gate 
197*0Sstevel@tonic-gate 	/* init working variables */
198*0Sstevel@tonic-gate 	wvp->xcs_firstp = NULL;
199*0Sstevel@tonic-gate 	wvp->xcs_currentp = NULL;
200*0Sstevel@tonic-gate 
201*0Sstevel@tonic-gate 	wvp->dma_firstp = NULL;
202*0Sstevel@tonic-gate 	wvp->dma_currentp = NULL;
203*0Sstevel@tonic-gate 	wvp->dma_bld_error = 0;
204*0Sstevel@tonic-gate 
205*0Sstevel@tonic-gate 	wvp->ixl_io_mode = ctxtp->ctxt_flags;
206*0Sstevel@tonic-gate 	wvp->ixl_cur_cmdp = NULL;
207*0Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp = NULL;
208*0Sstevel@tonic-gate 	wvp->ixl_cur_labelp = NULL;
209*0Sstevel@tonic-gate 
210*0Sstevel@tonic-gate 	wvp->ixl_xfer_st_cnt = 0;	/* count of xfer start commands found */
211*0Sstevel@tonic-gate 	wvp->xfer_state = XFER_NONE;	/* none, pkt, buf, skip, hdronly */
212*0Sstevel@tonic-gate 	wvp->xfer_hci_flush = 0;	/* updateable - xfer, jump, set */
213*0Sstevel@tonic-gate 	wvp->xfer_pktlen = 0;
214*0Sstevel@tonic-gate 	wvp->xfer_bufcnt = 0;
215*0Sstevel@tonic-gate 	wvp->descriptors = 0;
216*0Sstevel@tonic-gate 
217*0Sstevel@tonic-gate 	/* START RECV ONLY SECTION */
218*0Sstevel@tonic-gate 	wvp->ixl_setsyncwait_cnt = 0;
219*0Sstevel@tonic-gate 
220*0Sstevel@tonic-gate 	/* START XMIT ONLY SECTION */
221*0Sstevel@tonic-gate 	wvp->ixl_settagsync_cmdp = NULL;
222*0Sstevel@tonic-gate 	wvp->ixl_setskipmode_cmdp = NULL;
223*0Sstevel@tonic-gate 	wvp->default_skipmode = ctxtp->default_skipmode; /* nxt,self,stop,jmp */
224*0Sstevel@tonic-gate 	wvp->default_skiplabelp = ctxtp->default_skiplabelp;
225*0Sstevel@tonic-gate 	wvp->default_skipxferp = NULL;
226*0Sstevel@tonic-gate 	wvp->skipmode = ctxtp->default_skipmode;
227*0Sstevel@tonic-gate 	wvp->skiplabelp = NULL;
228*0Sstevel@tonic-gate 	wvp->skipxferp = NULL;
229*0Sstevel@tonic-gate 	wvp->default_tag = ctxtp->default_tag;
230*0Sstevel@tonic-gate 	wvp->default_sync = ctxtp->default_sync;
231*0Sstevel@tonic-gate 	wvp->storevalue_bufp = hci1394_alloc_storevalue_dma_mem(wvp);
232*0Sstevel@tonic-gate 	wvp->storevalue_data = 0;
233*0Sstevel@tonic-gate 	wvp->xmit_pkthdr1 = 0;
234*0Sstevel@tonic-gate 	wvp->xmit_pkthdr2 = 0;
235*0Sstevel@tonic-gate 	/* END XMIT ONLY SECTION */
236*0Sstevel@tonic-gate 
237*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_exit,
238*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
239*0Sstevel@tonic-gate }
240*0Sstevel@tonic-gate 
241*0Sstevel@tonic-gate /*
242*0Sstevel@tonic-gate  * hci1394_compile_ixl_endup()
243*0Sstevel@tonic-gate  *    This routine is called just before the main hci1394_compile_ixl() exits.
244*0Sstevel@tonic-gate  *    It checks for errors and performs the appropriate cleanup, or it rolls any
245*0Sstevel@tonic-gate  *    relevant info from the working variables struct into the context structure
246*0Sstevel@tonic-gate  */
247*0Sstevel@tonic-gate static void
hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t * wvp)248*0Sstevel@tonic-gate hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp)
249*0Sstevel@tonic-gate {
250*0Sstevel@tonic-gate 	ixl1394_command_t *ixl_exec_stp;
251*0Sstevel@tonic-gate 	hci1394_idma_desc_mem_t *dma_nextp;
252*0Sstevel@tonic-gate 	int err;
253*0Sstevel@tonic-gate 
254*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_enter,
255*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
256*0Sstevel@tonic-gate 
257*0Sstevel@tonic-gate 	/* error if no descriptor blocks found in ixl & created in dma memory */
258*0Sstevel@tonic-gate 	if ((wvp->dma_bld_error == 0) && (wvp->ixl_xfer_st_cnt == 0)) {
259*0Sstevel@tonic-gate 		TNF_PROBE_1(hci1394_compile_ixl_endup_nodata_error,
260*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
261*0Sstevel@tonic-gate 		    "IXL1394_ENO_DATA_PKTS: prog has no data packets");
262*0Sstevel@tonic-gate 
263*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
264*0Sstevel@tonic-gate 	}
265*0Sstevel@tonic-gate 
266*0Sstevel@tonic-gate 	/* if no errors yet, find the first IXL command that's a transfer cmd */
267*0Sstevel@tonic-gate 	if (wvp->dma_bld_error == 0) {
268*0Sstevel@tonic-gate 		err = hci1394_ixl_find_next_exec_xfer(wvp->ctxtp->ixl_firstp,
269*0Sstevel@tonic-gate 		    NULL, &ixl_exec_stp);
270*0Sstevel@tonic-gate 
271*0Sstevel@tonic-gate 		/* error if a label<->jump loop, or no xfer */
272*0Sstevel@tonic-gate 		if ((err == DDI_FAILURE) || (ixl_exec_stp == NULL)) {
273*0Sstevel@tonic-gate 			TNF_PROBE_1(hci1394_compile_ixl_endup_error,
274*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
275*0Sstevel@tonic-gate 			    "IXL1394_ENO_DATA_PKTS: loop or no xfer detected");
276*0Sstevel@tonic-gate 
277*0Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
278*0Sstevel@tonic-gate 		}
279*0Sstevel@tonic-gate 	}
280*0Sstevel@tonic-gate 
281*0Sstevel@tonic-gate 	/* Sync all the DMA descriptor buffers */
282*0Sstevel@tonic-gate 	dma_nextp = wvp->ctxtp->dma_firstp;
283*0Sstevel@tonic-gate 	while (dma_nextp != NULL) {
284*0Sstevel@tonic-gate 		err = ddi_dma_sync(dma_nextp->mem.bi_dma_handle,
285*0Sstevel@tonic-gate 		    (off_t)dma_nextp->mem.bi_kaddr, dma_nextp->mem.bi_length,
286*0Sstevel@tonic-gate 		    DDI_DMA_SYNC_FORDEV);
287*0Sstevel@tonic-gate 		if (err != DDI_SUCCESS) {
288*0Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
289*0Sstevel@tonic-gate 
290*0Sstevel@tonic-gate 			TNF_PROBE_1(hci1394_compile_ixl_endup_error,
291*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
292*0Sstevel@tonic-gate 			    "IXL1394_INTERNAL_ERROR: dma_sync() failed");
293*0Sstevel@tonic-gate 			break;
294*0Sstevel@tonic-gate 		}
295*0Sstevel@tonic-gate 
296*0Sstevel@tonic-gate 		/* advance to next dma memory descriptor */
297*0Sstevel@tonic-gate 		dma_nextp = dma_nextp->dma_nextp;
298*0Sstevel@tonic-gate 	}
299*0Sstevel@tonic-gate 
300*0Sstevel@tonic-gate 	/*
301*0Sstevel@tonic-gate 	 * If error, cleanup and return. delete all allocated xfer_ctl structs
302*0Sstevel@tonic-gate 	 * and all dma descriptor page memory and its dma memory blocks too.
303*0Sstevel@tonic-gate 	 */
304*0Sstevel@tonic-gate 	if (wvp->dma_bld_error != 0) {
305*0Sstevel@tonic-gate 		wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
306*0Sstevel@tonic-gate 		wvp->ctxtp->dma_firstp = wvp->dma_firstp;
307*0Sstevel@tonic-gate 		hci1394_ixl_cleanup(wvp->soft_statep, wvp->ctxtp);
308*0Sstevel@tonic-gate 
309*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
310*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
311*0Sstevel@tonic-gate 		return;
312*0Sstevel@tonic-gate 	}
313*0Sstevel@tonic-gate 
314*0Sstevel@tonic-gate 	/* can only get to here if the first ixl transfer command is found */
315*0Sstevel@tonic-gate 
316*0Sstevel@tonic-gate 	/* set required processing vars into ctxtp struct */
317*0Sstevel@tonic-gate 	wvp->ctxtp->default_skipxferp = wvp->default_skipxferp;
318*0Sstevel@tonic-gate 	wvp->ctxtp->dma_mem_execp = 0;
319*0Sstevel@tonic-gate 
320*0Sstevel@tonic-gate 	/*
321*0Sstevel@tonic-gate 	 * the transfer command's compiler private xfer_ctl structure has the
322*0Sstevel@tonic-gate 	 * appropriate bound address
323*0Sstevel@tonic-gate 	 */
324*0Sstevel@tonic-gate 	wvp->ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
325*0Sstevel@tonic-gate 	    ixl_exec_stp->compiler_privatep)->dma[0].dma_bound;
326*0Sstevel@tonic-gate 	wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
327*0Sstevel@tonic-gate 	wvp->ctxtp->dma_firstp = wvp->dma_firstp;
328*0Sstevel@tonic-gate 	wvp->ctxtp->dma_last_time = 0;
329*0Sstevel@tonic-gate 	wvp->ctxtp->ixl_exec_depth = 0;
330*0Sstevel@tonic-gate 	wvp->ctxtp->ixl_execp = NULL;
331*0Sstevel@tonic-gate 
332*0Sstevel@tonic-gate 	/* compile done */
333*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
334*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
335*0Sstevel@tonic-gate }
336*0Sstevel@tonic-gate 
337*0Sstevel@tonic-gate /*
338*0Sstevel@tonic-gate  * hci1394_parse_ixl()
339*0Sstevel@tonic-gate  *    Scan IXL program and build ohci DMA descriptor blocks in dma memory.
340*0Sstevel@tonic-gate  *
341*0Sstevel@tonic-gate  *    Parse/process succeeding ixl commands until end of IXL linked list is
342*0Sstevel@tonic-gate  *    reached. Evaluate ixl syntax and build (xmit or recv) descriptor
343*0Sstevel@tonic-gate  *    blocks.  To aid execution time evaluation of current location, enable
344*0Sstevel@tonic-gate  *    status recording on each descriptor block built.
345*0Sstevel@tonic-gate  *    On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
346*0Sstevel@tonic-gate  */
347*0Sstevel@tonic-gate static void
hci1394_parse_ixl(hci1394_comp_ixl_vars_t * wvp,ixl1394_command_t * ixlp)348*0Sstevel@tonic-gate hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp, ixl1394_command_t *ixlp)
349*0Sstevel@tonic-gate {
350*0Sstevel@tonic-gate 	ixl1394_command_t *ixlnextp = ixlp;	/* addr of next ixl cmd */
351*0Sstevel@tonic-gate 	ixl1394_command_t *ixlcurp = NULL;	/* addr of current ixl cmd */
352*0Sstevel@tonic-gate 	uint16_t ixlopcode = 0;			/* opcode of currnt ixl cmd */
353*0Sstevel@tonic-gate 
354*0Sstevel@tonic-gate 	uint32_t pktsize;
355*0Sstevel@tonic-gate 	uint32_t pktcnt;
356*0Sstevel@tonic-gate 
357*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_parse_ixl_enter, HCI1394_TNF_HAL_STACK_ISOCH,
358*0Sstevel@tonic-gate 	    "");
359*0Sstevel@tonic-gate 
360*0Sstevel@tonic-gate 	/* follow ixl links until reach end or find error */
361*0Sstevel@tonic-gate 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
362*0Sstevel@tonic-gate 
363*0Sstevel@tonic-gate 		/* set this command as the current ixl command */
364*0Sstevel@tonic-gate 		wvp->ixl_cur_cmdp = ixlcurp = ixlnextp;
365*0Sstevel@tonic-gate 		ixlnextp = ixlcurp->next_ixlp;
366*0Sstevel@tonic-gate 
367*0Sstevel@tonic-gate 		ixlopcode = ixlcurp->ixl_opcode;
368*0Sstevel@tonic-gate 
369*0Sstevel@tonic-gate 		/* init compiler controlled values in current ixl command */
370*0Sstevel@tonic-gate 		ixlcurp->compiler_privatep = NULL;
371*0Sstevel@tonic-gate 		ixlcurp->compiler_resv = 0;
372*0Sstevel@tonic-gate 
373*0Sstevel@tonic-gate 		/* error if xmit/recv mode not appropriate for current cmd */
374*0Sstevel@tonic-gate 		if ((((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) &&
375*0Sstevel@tonic-gate 			((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
376*0Sstevel@tonic-gate 		    (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
377*0Sstevel@tonic-gate 			((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
378*0Sstevel@tonic-gate 
379*0Sstevel@tonic-gate 			/* check if command op failed because it was invalid */
380*0Sstevel@tonic-gate 			if (hci1394_is_opcode_valid(ixlopcode) != B_TRUE) {
381*0Sstevel@tonic-gate 				TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
382*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
383*0Sstevel@tonic-gate 				    errmsg, "IXL1394_BAD_IXL_OPCODE",
384*0Sstevel@tonic-gate 				    tnf_opaque, ixl_commandp, ixlcurp,
385*0Sstevel@tonic-gate 				    tnf_opaque, ixl_opcode, ixlopcode);
386*0Sstevel@tonic-gate 
387*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
388*0Sstevel@tonic-gate 			} else {
389*0Sstevel@tonic-gate 				TNF_PROBE_3(hci1394_parse_ixl_mode_error,
390*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
391*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
392*0Sstevel@tonic-gate 				    "invalid ixlop in mode", tnf_uint, io_mode,
393*0Sstevel@tonic-gate 				    wvp->ixl_io_mode, tnf_opaque, ixl_opcode,
394*0Sstevel@tonic-gate 				    ixlopcode);
395*0Sstevel@tonic-gate 
396*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
397*0Sstevel@tonic-gate 			}
398*0Sstevel@tonic-gate 			continue;
399*0Sstevel@tonic-gate 		}
400*0Sstevel@tonic-gate 
401*0Sstevel@tonic-gate 		/*
402*0Sstevel@tonic-gate 		 * if ends xfer flag set, finalize current xfer descriptor
403*0Sstevel@tonic-gate 		 * block build
404*0Sstevel@tonic-gate 		 */
405*0Sstevel@tonic-gate 		if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
406*0Sstevel@tonic-gate 			/* finalize any descriptor block build in progress */
407*0Sstevel@tonic-gate 			hci1394_finalize_cur_xfer_desc(wvp);
408*0Sstevel@tonic-gate 
409*0Sstevel@tonic-gate 			if (wvp->dma_bld_error != 0) {
410*0Sstevel@tonic-gate 				continue;
411*0Sstevel@tonic-gate 			}
412*0Sstevel@tonic-gate 		}
413*0Sstevel@tonic-gate 
414*0Sstevel@tonic-gate 		/*
415*0Sstevel@tonic-gate 		 * now process based on specific opcode value
416*0Sstevel@tonic-gate 		 */
417*0Sstevel@tonic-gate 		switch (ixlopcode) {
418*0Sstevel@tonic-gate 
419*0Sstevel@tonic-gate 		case IXL1394_OP_RECV_BUF:
420*0Sstevel@tonic-gate 		case IXL1394_OP_RECV_BUF_U: {
421*0Sstevel@tonic-gate 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
422*0Sstevel@tonic-gate 
423*0Sstevel@tonic-gate 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
424*0Sstevel@tonic-gate 
425*0Sstevel@tonic-gate 			/*
426*0Sstevel@tonic-gate 			 * In packet-per-buffer mode:
427*0Sstevel@tonic-gate 			 *    This ixl command builds a collection of xfer
428*0Sstevel@tonic-gate 			 *    descriptor blocks (size/pkt_size of them) each to
429*0Sstevel@tonic-gate 			 *    recv a packet whose buffer size is pkt_size and
430*0Sstevel@tonic-gate 			 *    whose buffer ptr is (pktcur*pkt_size + bufp)
431*0Sstevel@tonic-gate 			 *
432*0Sstevel@tonic-gate 			 * In buffer fill mode:
433*0Sstevel@tonic-gate 			 *    This ixl command builds a single xfer descriptor
434*0Sstevel@tonic-gate 			 *    block to recv as many packets or parts of packets
435*0Sstevel@tonic-gate 			 *    as can fit into the buffer size specified
436*0Sstevel@tonic-gate 			 *    (pkt_size is not used).
437*0Sstevel@tonic-gate 			 */
438*0Sstevel@tonic-gate 
439*0Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
440*0Sstevel@tonic-gate 			wvp->xfer_state = XFER_BUF;
441*0Sstevel@tonic-gate 
442*0Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
443*0Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
444*0Sstevel@tonic-gate 
445*0Sstevel@tonic-gate 			/*
446*0Sstevel@tonic-gate 			 * perform packet-per-buffer checks
447*0Sstevel@tonic-gate 			 * (no checks needed when in buffer fill mode)
448*0Sstevel@tonic-gate 			 */
449*0Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) == 0) {
450*0Sstevel@tonic-gate 
451*0Sstevel@tonic-gate 				/* the packets must use the buffer exactly */
452*0Sstevel@tonic-gate 				pktsize = cur_xfer_buf_ixlp->pkt_size;
453*0Sstevel@tonic-gate 				pktcnt = 0;
454*0Sstevel@tonic-gate 				if (pktsize != 0) {
455*0Sstevel@tonic-gate 					pktcnt = cur_xfer_buf_ixlp->size /
456*0Sstevel@tonic-gate 					    pktsize;
457*0Sstevel@tonic-gate 				}
458*0Sstevel@tonic-gate 				if ((pktcnt == 0) || ((pktsize * pktcnt) !=
459*0Sstevel@tonic-gate 				    cur_xfer_buf_ixlp->size)) {
460*0Sstevel@tonic-gate 
461*0Sstevel@tonic-gate 					TNF_PROBE_3(hci1394_parse_ixl_rat_error,
462*0Sstevel@tonic-gate 					    HCI1394_TNF_HAL_ERROR_ISOCH, "",
463*0Sstevel@tonic-gate 					    tnf_string, errmsg,
464*0Sstevel@tonic-gate 					    "IXL1394_EPKTSIZE_RATIO", tnf_int,
465*0Sstevel@tonic-gate 					    buf_size, cur_xfer_buf_ixlp->size,
466*0Sstevel@tonic-gate 					    tnf_int, pkt_size, pktsize);
467*0Sstevel@tonic-gate 
468*0Sstevel@tonic-gate 					wvp->dma_bld_error =
469*0Sstevel@tonic-gate 					    IXL1394_EPKTSIZE_RATIO;
470*0Sstevel@tonic-gate 					continue;
471*0Sstevel@tonic-gate 				}
472*0Sstevel@tonic-gate 			}
473*0Sstevel@tonic-gate 
474*0Sstevel@tonic-gate 			/*
475*0Sstevel@tonic-gate 			 * set buffer pointer & size into first xfer_bufp
476*0Sstevel@tonic-gate 			 * and xfer_size
477*0Sstevel@tonic-gate 			 */
478*0Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
479*0Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
480*0Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
481*0Sstevel@tonic-gate 
482*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
483*0Sstevel@tonic-gate 				continue;
484*0Sstevel@tonic-gate 			}
485*0Sstevel@tonic-gate 			break;
486*0Sstevel@tonic-gate 		}
487*0Sstevel@tonic-gate 
488*0Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT_ST:
489*0Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT_ST_U: {
490*0Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
491*0Sstevel@tonic-gate 
492*0Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
493*0Sstevel@tonic-gate 
494*0Sstevel@tonic-gate 			/* error if in buffer fill mode */
495*0Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
496*0Sstevel@tonic-gate 				TNF_PROBE_1(hci1394_parse_ixl_mode_error,
497*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
498*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
499*0Sstevel@tonic-gate 				    "RECV_PKT_ST used in BFFILL mode");
500*0Sstevel@tonic-gate 
501*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
502*0Sstevel@tonic-gate 				continue;
503*0Sstevel@tonic-gate 			}
504*0Sstevel@tonic-gate 
505*0Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
506*0Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
507*0Sstevel@tonic-gate 			wvp->xfer_state = XFER_PKT;
508*0Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
509*0Sstevel@tonic-gate 
510*0Sstevel@tonic-gate 			/*
511*0Sstevel@tonic-gate 			 * set buffer pointer & size into first xfer_bufp
512*0Sstevel@tonic-gate 			 * and xfer_size
513*0Sstevel@tonic-gate 			 */
514*0Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
515*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
516*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
517*0Sstevel@tonic-gate 
518*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
519*0Sstevel@tonic-gate 				continue;
520*0Sstevel@tonic-gate 			}
521*0Sstevel@tonic-gate 			break;
522*0Sstevel@tonic-gate 		}
523*0Sstevel@tonic-gate 
524*0Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT:
525*0Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT_U: {
526*0Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
527*0Sstevel@tonic-gate 
528*0Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
529*0Sstevel@tonic-gate 
530*0Sstevel@tonic-gate 			/* error if in buffer fill mode */
531*0Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
532*0Sstevel@tonic-gate 				TNF_PROBE_1(hci1394_parse_ixl_mode_error,
533*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
534*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
535*0Sstevel@tonic-gate 				    "RECV_PKT_ST used in BFFILL mode");
536*0Sstevel@tonic-gate 
537*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
538*0Sstevel@tonic-gate 				continue;
539*0Sstevel@tonic-gate 			}
540*0Sstevel@tonic-gate 
541*0Sstevel@tonic-gate 			/* error if xfer_state not xfer pkt */
542*0Sstevel@tonic-gate 			if (wvp->xfer_state != XFER_PKT) {
543*0Sstevel@tonic-gate 				TNF_PROBE_1(hci1394_parse_ixl_misplacercv_error,
544*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
545*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EMISPLACED_RECV: "
546*0Sstevel@tonic-gate 				    "RECV_PKT without RECV_PKT_ST");
547*0Sstevel@tonic-gate 
548*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EMISPLACED_RECV;
549*0Sstevel@tonic-gate 				continue;
550*0Sstevel@tonic-gate 			}
551*0Sstevel@tonic-gate 
552*0Sstevel@tonic-gate 			/*
553*0Sstevel@tonic-gate 			 * save xfer start cmd ixl ptr in compiler_privatep
554*0Sstevel@tonic-gate 			 * field of this cmd
555*0Sstevel@tonic-gate 			 */
556*0Sstevel@tonic-gate 			ixlcurp->compiler_privatep = (void *)
557*0Sstevel@tonic-gate 			    wvp->ixl_cur_xfer_stp;
558*0Sstevel@tonic-gate 
559*0Sstevel@tonic-gate 			/*
560*0Sstevel@tonic-gate 			 * save pkt index [1-n] in compiler_resv field of
561*0Sstevel@tonic-gate 			 * this cmd
562*0Sstevel@tonic-gate 			 */
563*0Sstevel@tonic-gate 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
564*0Sstevel@tonic-gate 
565*0Sstevel@tonic-gate 			/*
566*0Sstevel@tonic-gate 			 * set buffer pointer & size into next xfer_bufp
567*0Sstevel@tonic-gate 			 * and xfer_size
568*0Sstevel@tonic-gate 			 */
569*0Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
570*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
571*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
572*0Sstevel@tonic-gate 
573*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
574*0Sstevel@tonic-gate 				continue;
575*0Sstevel@tonic-gate 			}
576*0Sstevel@tonic-gate 
577*0Sstevel@tonic-gate 			/*
578*0Sstevel@tonic-gate 			 * set updateable xfer cache flush eval flag if
579*0Sstevel@tonic-gate 			 * updateable opcode
580*0Sstevel@tonic-gate 			 */
581*0Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
582*0Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
583*0Sstevel@tonic-gate 			}
584*0Sstevel@tonic-gate 			break;
585*0Sstevel@tonic-gate 		}
586*0Sstevel@tonic-gate 
587*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_BUF:
588*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_BUF_U: {
589*0Sstevel@tonic-gate 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
590*0Sstevel@tonic-gate 
591*0Sstevel@tonic-gate 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
592*0Sstevel@tonic-gate 
593*0Sstevel@tonic-gate 			/*
594*0Sstevel@tonic-gate 			 * These send_buf commands build a collection of xmit
595*0Sstevel@tonic-gate 			 * descriptor blocks (size/pkt_size of them) each to
596*0Sstevel@tonic-gate 			 * xfer a packet whose buffer size is pkt_size and whose
597*0Sstevel@tonic-gate 			 * buffer pt is (pktcur*pkt_size + bufp). (ptr and size
598*0Sstevel@tonic-gate 			 * are adjusted if they have header form of ixl cmd)
599*0Sstevel@tonic-gate 			 */
600*0Sstevel@tonic-gate 
601*0Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
602*0Sstevel@tonic-gate 			wvp->xfer_state = XFER_BUF;
603*0Sstevel@tonic-gate 
604*0Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
605*0Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
606*0Sstevel@tonic-gate 
607*0Sstevel@tonic-gate 			/* the packets must use the buffer exactly,else error */
608*0Sstevel@tonic-gate 			pktsize = cur_xfer_buf_ixlp->pkt_size;
609*0Sstevel@tonic-gate 			pktcnt = 0;
610*0Sstevel@tonic-gate 			if (pktsize != 0) {
611*0Sstevel@tonic-gate 				pktcnt = cur_xfer_buf_ixlp->size / pktsize;
612*0Sstevel@tonic-gate 			}
613*0Sstevel@tonic-gate 			if ((pktcnt == 0) || ((pktsize * pktcnt) !=
614*0Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->size)) {
615*0Sstevel@tonic-gate 
616*0Sstevel@tonic-gate 				TNF_PROBE_3(hci1394_parse_ixl_rat_error,
617*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
618*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EPKTSIZE_RATIO", tnf_int,
619*0Sstevel@tonic-gate 				    buf_size, cur_xfer_buf_ixlp->size, tnf_int,
620*0Sstevel@tonic-gate 				    pkt_size, pktsize);
621*0Sstevel@tonic-gate 
622*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EPKTSIZE_RATIO;
623*0Sstevel@tonic-gate 				continue;
624*0Sstevel@tonic-gate 			}
625*0Sstevel@tonic-gate 
626*0Sstevel@tonic-gate 			/* set buf ptr & size into 1st xfer_bufp & xfer_size */
627*0Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
628*0Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
629*0Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
630*0Sstevel@tonic-gate 
631*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
632*0Sstevel@tonic-gate 				continue;
633*0Sstevel@tonic-gate 			}
634*0Sstevel@tonic-gate 			break;
635*0Sstevel@tonic-gate 		}
636*0Sstevel@tonic-gate 
637*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_ST:
638*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_ST_U: {
639*0Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
640*0Sstevel@tonic-gate 
641*0Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
642*0Sstevel@tonic-gate 
643*0Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
644*0Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
645*0Sstevel@tonic-gate 			wvp->xfer_state = XFER_PKT;
646*0Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
647*0Sstevel@tonic-gate 
648*0Sstevel@tonic-gate 			/*
649*0Sstevel@tonic-gate 			 * set buffer pointer & size into first xfer_bufp and
650*0Sstevel@tonic-gate 			 * xfer_size
651*0Sstevel@tonic-gate 			 */
652*0Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
653*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
654*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
655*0Sstevel@tonic-gate 
656*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
657*0Sstevel@tonic-gate 				continue;
658*0Sstevel@tonic-gate 			}
659*0Sstevel@tonic-gate 			break;
660*0Sstevel@tonic-gate 		}
661*0Sstevel@tonic-gate 
662*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_WHDR_ST:
663*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
664*0Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
665*0Sstevel@tonic-gate 
666*0Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
667*0Sstevel@tonic-gate 
668*0Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
669*0Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
670*0Sstevel@tonic-gate 			wvp->xfer_state = XFER_PKT;
671*0Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
672*0Sstevel@tonic-gate 
673*0Sstevel@tonic-gate 			/*
674*0Sstevel@tonic-gate 			 * buffer size must be at least 4 (must include header),
675*0Sstevel@tonic-gate 			 * else error
676*0Sstevel@tonic-gate 			 */
677*0Sstevel@tonic-gate 			if (cur_xfer_pkt_ixlp->size < 4) {
678*0Sstevel@tonic-gate 				TNF_PROBE_2(hci1394_parse_ixl_hdr_missing_error,
679*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
680*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EPKT_HDR_MISSING", tnf_int,
681*0Sstevel@tonic-gate 				    pkt_size, cur_xfer_pkt_ixlp->size);
682*0Sstevel@tonic-gate 
683*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EPKT_HDR_MISSING;
684*0Sstevel@tonic-gate 				continue;
685*0Sstevel@tonic-gate 			}
686*0Sstevel@tonic-gate 
687*0Sstevel@tonic-gate 			/*
688*0Sstevel@tonic-gate 			 * set buffer and size(excluding header) into first
689*0Sstevel@tonic-gate 			 * xfer_bufp and xfer_size
690*0Sstevel@tonic-gate 			 */
691*0Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
692*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr + 4,
693*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size - 4) != DDI_SUCCESS) {
694*0Sstevel@tonic-gate 
695*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
696*0Sstevel@tonic-gate 				continue;
697*0Sstevel@tonic-gate 			}
698*0Sstevel@tonic-gate 			break;
699*0Sstevel@tonic-gate 		}
700*0Sstevel@tonic-gate 
701*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT:
702*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_U: {
703*0Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
704*0Sstevel@tonic-gate 
705*0Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
706*0Sstevel@tonic-gate 
707*0Sstevel@tonic-gate 			/* error if xfer_state not xfer pkt */
708*0Sstevel@tonic-gate 			if (wvp->xfer_state != XFER_PKT) {
709*0Sstevel@tonic-gate 				TNF_PROBE_1(hci1394_parse_ixl_misplacesnd_error,
710*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
711*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EMISPLACED_SEND: SEND_PKT "
712*0Sstevel@tonic-gate 				    "without SEND_PKT_ST");
713*0Sstevel@tonic-gate 
714*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EMISPLACED_SEND;
715*0Sstevel@tonic-gate 				continue;
716*0Sstevel@tonic-gate 			}
717*0Sstevel@tonic-gate 
718*0Sstevel@tonic-gate 			/*
719*0Sstevel@tonic-gate 			 * save xfer start cmd ixl ptr in compiler_privatep
720*0Sstevel@tonic-gate 			 * field of this cmd
721*0Sstevel@tonic-gate 			 */
722*0Sstevel@tonic-gate 			ixlcurp->compiler_privatep = (void *)
723*0Sstevel@tonic-gate 			    wvp->ixl_cur_xfer_stp;
724*0Sstevel@tonic-gate 
725*0Sstevel@tonic-gate 			/*
726*0Sstevel@tonic-gate 			 * save pkt index [1-n] in compiler_resv field of this
727*0Sstevel@tonic-gate 			 * cmd
728*0Sstevel@tonic-gate 			 */
729*0Sstevel@tonic-gate 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
730*0Sstevel@tonic-gate 
731*0Sstevel@tonic-gate 			/*
732*0Sstevel@tonic-gate 			 * set buffer pointer & size into next xfer_bufp
733*0Sstevel@tonic-gate 			 * and xfer_size
734*0Sstevel@tonic-gate 			 */
735*0Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
736*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
737*0Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
738*0Sstevel@tonic-gate 
739*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
740*0Sstevel@tonic-gate 				continue;
741*0Sstevel@tonic-gate 			}
742*0Sstevel@tonic-gate 
743*0Sstevel@tonic-gate 			/*
744*0Sstevel@tonic-gate 			 * set updateable xfer cache flush eval flag if
745*0Sstevel@tonic-gate 			 * updateable opcode
746*0Sstevel@tonic-gate 			 */
747*0Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
748*0Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
749*0Sstevel@tonic-gate 			}
750*0Sstevel@tonic-gate 			break;
751*0Sstevel@tonic-gate 		}
752*0Sstevel@tonic-gate 
753*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_HDR_ONLY:
754*0Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
755*0Sstevel@tonic-gate 			wvp->xfer_state = XMIT_HDRONLY;
756*0Sstevel@tonic-gate 
757*0Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
758*0Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
759*0Sstevel@tonic-gate 			break;
760*0Sstevel@tonic-gate 
761*0Sstevel@tonic-gate 		case IXL1394_OP_SEND_NO_PKT:
762*0Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
763*0Sstevel@tonic-gate 			wvp->xfer_state = XMIT_NOPKT;
764*0Sstevel@tonic-gate 
765*0Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
766*0Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
767*0Sstevel@tonic-gate 			break;
768*0Sstevel@tonic-gate 
769*0Sstevel@tonic-gate 		case IXL1394_OP_JUMP:
770*0Sstevel@tonic-gate 		case IXL1394_OP_JUMP_U: {
771*0Sstevel@tonic-gate 			ixl1394_jump_t *cur_jump_ixlp;
772*0Sstevel@tonic-gate 
773*0Sstevel@tonic-gate 			cur_jump_ixlp = (ixl1394_jump_t *)ixlcurp;
774*0Sstevel@tonic-gate 
775*0Sstevel@tonic-gate 			/*
776*0Sstevel@tonic-gate 			 * verify label indicated by IXL1394_OP_JUMP is
777*0Sstevel@tonic-gate 			 * actually an IXL1394_OP_LABEL or NULL
778*0Sstevel@tonic-gate 			 */
779*0Sstevel@tonic-gate 			if ((cur_jump_ixlp->label != NULL) &&
780*0Sstevel@tonic-gate 			    (cur_jump_ixlp->label->ixl_opcode !=
781*0Sstevel@tonic-gate 			    IXL1394_OP_LABEL)) {
782*0Sstevel@tonic-gate 				TNF_PROBE_3(hci1394_parse_ixl_jumplabel_error,
783*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
784*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
785*0Sstevel@tonic-gate 				    tnf_opaque, jumpixl_commandp, ixlcurp,
786*0Sstevel@tonic-gate 				    tnf_opaque, jumpto_ixl,
787*0Sstevel@tonic-gate 				    cur_jump_ixlp->label);
788*0Sstevel@tonic-gate 
789*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
790*0Sstevel@tonic-gate 				continue;
791*0Sstevel@tonic-gate 			}
792*0Sstevel@tonic-gate 			break;
793*0Sstevel@tonic-gate 		}
794*0Sstevel@tonic-gate 
795*0Sstevel@tonic-gate 		case IXL1394_OP_LABEL:
796*0Sstevel@tonic-gate 			/*
797*0Sstevel@tonic-gate 			 * save current ixl label command for xfer cmd
798*0Sstevel@tonic-gate 			 * finalize processing
799*0Sstevel@tonic-gate 			 */
800*0Sstevel@tonic-gate 			wvp->ixl_cur_labelp = ixlcurp;
801*0Sstevel@tonic-gate 
802*0Sstevel@tonic-gate 			/* set initiating label flag to cause cache flush */
803*0Sstevel@tonic-gate 			wvp->xfer_hci_flush |= INITIATING_LBL;
804*0Sstevel@tonic-gate 			break;
805*0Sstevel@tonic-gate 
806*0Sstevel@tonic-gate 		case IXL1394_OP_CALLBACK:
807*0Sstevel@tonic-gate 		case IXL1394_OP_CALLBACK_U:
808*0Sstevel@tonic-gate 		case IXL1394_OP_STORE_TIMESTAMP:
809*0Sstevel@tonic-gate 			/*
810*0Sstevel@tonic-gate 			 * these commands are accepted during compile,
811*0Sstevel@tonic-gate 			 * processed during execution (interrupt handling)
812*0Sstevel@tonic-gate 			 * No further processing is needed here.
813*0Sstevel@tonic-gate 			 */
814*0Sstevel@tonic-gate 			break;
815*0Sstevel@tonic-gate 
816*0Sstevel@tonic-gate 		case IXL1394_OP_SET_SKIPMODE:
817*0Sstevel@tonic-gate 		case IXL1394_OP_SET_SKIPMODE_U:
818*0Sstevel@tonic-gate 			/*
819*0Sstevel@tonic-gate 			 * Error if already have a set skipmode cmd for
820*0Sstevel@tonic-gate 			 * this xfer
821*0Sstevel@tonic-gate 			 */
822*0Sstevel@tonic-gate 			if (wvp->ixl_setskipmode_cmdp != NULL) {
823*0Sstevel@tonic-gate 				TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
824*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
825*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
826*0Sstevel@tonic-gate 				    " duplicate set skipmode", tnf_opaque,
827*0Sstevel@tonic-gate 				    ixl_commandp, ixlcurp);
828*0Sstevel@tonic-gate 
829*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
830*0Sstevel@tonic-gate 				continue;
831*0Sstevel@tonic-gate 			}
832*0Sstevel@tonic-gate 
833*0Sstevel@tonic-gate 			/* save skip mode ixl command and verify skipmode */
834*0Sstevel@tonic-gate 			wvp->ixl_setskipmode_cmdp = (ixl1394_set_skipmode_t *)
835*0Sstevel@tonic-gate 			    ixlcurp;
836*0Sstevel@tonic-gate 
837*0Sstevel@tonic-gate 			if ((wvp->ixl_setskipmode_cmdp->skipmode !=
838*0Sstevel@tonic-gate 				IXL1394_SKIP_TO_NEXT) &&
839*0Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
840*0Sstevel@tonic-gate 				IXL1394_SKIP_TO_SELF) &&
841*0Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
842*0Sstevel@tonic-gate 				IXL1394_SKIP_TO_STOP) &&
843*0Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
844*0Sstevel@tonic-gate 				IXL1394_SKIP_TO_LABEL)) {
845*0Sstevel@tonic-gate 
846*0Sstevel@tonic-gate 				TNF_PROBE_3(hci1394_parse_ixl_dup_set_error,
847*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
848*0Sstevel@tonic-gate 				    errmsg, "IXL EBAD_SKIPMODE", tnf_opaque,
849*0Sstevel@tonic-gate 				    ixl_commandp, ixlcurp, tnf_int, skip,
850*0Sstevel@tonic-gate 				    wvp->ixl_setskipmode_cmdp->skipmode);
851*0Sstevel@tonic-gate 
852*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EBAD_SKIPMODE;
853*0Sstevel@tonic-gate 				continue;
854*0Sstevel@tonic-gate 			}
855*0Sstevel@tonic-gate 
856*0Sstevel@tonic-gate 			/*
857*0Sstevel@tonic-gate 			 * if mode is IXL1394_SKIP_TO_LABEL, verify label
858*0Sstevel@tonic-gate 			 * references an IXL1394_OP_LABEL
859*0Sstevel@tonic-gate 			 */
860*0Sstevel@tonic-gate 			if ((wvp->ixl_setskipmode_cmdp->skipmode ==
861*0Sstevel@tonic-gate 				IXL1394_SKIP_TO_LABEL) &&
862*0Sstevel@tonic-gate 			    ((wvp->ixl_setskipmode_cmdp->label == NULL) ||
863*0Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->label->ixl_opcode !=
864*0Sstevel@tonic-gate 				IXL1394_OP_LABEL))) {
865*0Sstevel@tonic-gate 
866*0Sstevel@tonic-gate 				TNF_PROBE_3(hci1394_parse_ixl_jump_error,
867*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
868*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
869*0Sstevel@tonic-gate 				    tnf_opaque, jumpixl_commandp, ixlcurp,
870*0Sstevel@tonic-gate 				    tnf_opaque, jumpto_ixl,
871*0Sstevel@tonic-gate 				    wvp->ixl_setskipmode_cmdp->label);
872*0Sstevel@tonic-gate 
873*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
874*0Sstevel@tonic-gate 				continue;
875*0Sstevel@tonic-gate 			}
876*0Sstevel@tonic-gate 			/*
877*0Sstevel@tonic-gate 			 * set updateable set cmd cache flush eval flag if
878*0Sstevel@tonic-gate 			 * updateable opcode
879*0Sstevel@tonic-gate 			 */
880*0Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
881*0Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
882*0Sstevel@tonic-gate 			}
883*0Sstevel@tonic-gate 			break;
884*0Sstevel@tonic-gate 
885*0Sstevel@tonic-gate 		case IXL1394_OP_SET_TAGSYNC:
886*0Sstevel@tonic-gate 		case IXL1394_OP_SET_TAGSYNC_U:
887*0Sstevel@tonic-gate 			/*
888*0Sstevel@tonic-gate 			 * is an error if already have a set tag and sync cmd
889*0Sstevel@tonic-gate 			 * for this xfer
890*0Sstevel@tonic-gate 			 */
891*0Sstevel@tonic-gate 			if (wvp->ixl_settagsync_cmdp != NULL) {
892*0Sstevel@tonic-gate 				TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
893*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
894*0Sstevel@tonic-gate 				    errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
895*0Sstevel@tonic-gate 				    " duplicate set tagsync", tnf_opaque,
896*0Sstevel@tonic-gate 				    ixl_commandp, ixlcurp);
897*0Sstevel@tonic-gate 
898*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
899*0Sstevel@tonic-gate 				continue;
900*0Sstevel@tonic-gate 			}
901*0Sstevel@tonic-gate 
902*0Sstevel@tonic-gate 			/* save ixl command containing tag and sync values */
903*0Sstevel@tonic-gate 			wvp->ixl_settagsync_cmdp =
904*0Sstevel@tonic-gate 			    (ixl1394_set_tagsync_t *)ixlcurp;
905*0Sstevel@tonic-gate 
906*0Sstevel@tonic-gate 			/*
907*0Sstevel@tonic-gate 			 * set updateable set cmd cache flush eval flag if
908*0Sstevel@tonic-gate 			 * updateable opcode
909*0Sstevel@tonic-gate 			 */
910*0Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
911*0Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
912*0Sstevel@tonic-gate 			}
913*0Sstevel@tonic-gate 			break;
914*0Sstevel@tonic-gate 
915*0Sstevel@tonic-gate 		case IXL1394_OP_SET_SYNCWAIT:
916*0Sstevel@tonic-gate 			/*
917*0Sstevel@tonic-gate 			 * count ixl wait-for-sync commands since last
918*0Sstevel@tonic-gate 			 * finalize ignore multiple occurrences for same xfer
919*0Sstevel@tonic-gate 			 * command
920*0Sstevel@tonic-gate 			 */
921*0Sstevel@tonic-gate 			wvp->ixl_setsyncwait_cnt++;
922*0Sstevel@tonic-gate 			break;
923*0Sstevel@tonic-gate 
924*0Sstevel@tonic-gate 		default:
925*0Sstevel@tonic-gate 			/* error - unknown/unimplemented ixl command */
926*0Sstevel@tonic-gate 			TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
927*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
928*0Sstevel@tonic-gate 			    "IXL1394_BAD_IXL_OPCODE", tnf_opaque, ixl_commandp,
929*0Sstevel@tonic-gate 			    ixlcurp, tnf_opaque, ixl_opcode, ixlopcode);
930*0Sstevel@tonic-gate 
931*0Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
932*0Sstevel@tonic-gate 			continue;
933*0Sstevel@tonic-gate 		}
934*0Sstevel@tonic-gate 	} /* while */
935*0Sstevel@tonic-gate 
936*0Sstevel@tonic-gate 	/* finalize any last descriptor block build */
937*0Sstevel@tonic-gate 	wvp->ixl_cur_cmdp = NULL;
938*0Sstevel@tonic-gate 	if (wvp->dma_bld_error == 0) {
939*0Sstevel@tonic-gate 		hci1394_finalize_cur_xfer_desc(wvp);
940*0Sstevel@tonic-gate 	}
941*0Sstevel@tonic-gate 
942*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_parse_ixl_exit,
943*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
944*0Sstevel@tonic-gate }
945*0Sstevel@tonic-gate 
946*0Sstevel@tonic-gate /*
947*0Sstevel@tonic-gate  * hci1394_finalize_all_xfer_desc()
948*0Sstevel@tonic-gate  *    Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
949*0Sstevel@tonic-gate  *
950*0Sstevel@tonic-gate  *    Set interrupt enable on first descriptor block associated with current
951*0Sstevel@tonic-gate  *    xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
952*0Sstevel@tonic-gate  *
953*0Sstevel@tonic-gate  *    Set interrupt enable on last descriptor block associated with current xfer
954*0Sstevel@tonic-gate  *    IXL command if any callback ixl commands are found on the execution path
955*0Sstevel@tonic-gate  *    between the current and the next xfer ixl command.  (Previously, this
956*0Sstevel@tonic-gate  *    applied to store timestamp ixl commands, as well.)
957*0Sstevel@tonic-gate  */
958*0Sstevel@tonic-gate static void
hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t * wvp)959*0Sstevel@tonic-gate hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
960*0Sstevel@tonic-gate {
961*0Sstevel@tonic-gate 	ixl1394_command_t *ixlcurp;		/* current ixl command */
962*0Sstevel@tonic-gate 	ixl1394_command_t *ixlnextp;		/* next ixl command */
963*0Sstevel@tonic-gate 	ixl1394_command_t *ixlexecnext;
964*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xferctl_curp;
965*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xferctl_nxtp;
966*0Sstevel@tonic-gate 	hci1394_desc_t		*hcidescp;
967*0Sstevel@tonic-gate 	ddi_acc_handle_t	acc_hdl;
968*0Sstevel@tonic-gate 	uint32_t	temp;
969*0Sstevel@tonic-gate 	uint32_t	dma_execnext_addr;
970*0Sstevel@tonic-gate 	uint32_t	dma_skiplabel_addr;
971*0Sstevel@tonic-gate 	uint32_t	dma_skip_addr;
972*0Sstevel@tonic-gate 	uint32_t	callback_cnt;
973*0Sstevel@tonic-gate 	uint16_t	repcnt;
974*0Sstevel@tonic-gate 	uint16_t	ixlopcode;
975*0Sstevel@tonic-gate 	int		ii;
976*0Sstevel@tonic-gate 	int		err;
977*0Sstevel@tonic-gate 
978*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_enter,
979*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
980*0Sstevel@tonic-gate 
981*0Sstevel@tonic-gate 	/*
982*0Sstevel@tonic-gate 	 * If xmit mode and if default skipmode is skip to label -
983*0Sstevel@tonic-gate 	 * follow exec path starting at default skipmode label until
984*0Sstevel@tonic-gate 	 * find the first ixl xfer command which is to be executed.
985*0Sstevel@tonic-gate 	 * Set its address into default_skipxferp.
986*0Sstevel@tonic-gate 	 */
987*0Sstevel@tonic-gate 	if (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
988*0Sstevel@tonic-gate 	    (wvp->ctxtp->default_skipmode == IXL1394_SKIP_TO_LABEL)) {
989*0Sstevel@tonic-gate 
990*0Sstevel@tonic-gate 		err = hci1394_ixl_find_next_exec_xfer(wvp->default_skiplabelp,
991*0Sstevel@tonic-gate 		    NULL, &wvp->default_skipxferp);
992*0Sstevel@tonic-gate 		if (err == DDI_FAILURE) {
993*0Sstevel@tonic-gate 			TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
994*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
995*0Sstevel@tonic-gate 			    "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
996*0Sstevel@tonic-gate 			    "for skiplabel default w/no xfers", tnf_opaque,
997*0Sstevel@tonic-gate 			    skipixl_cmdp, wvp->default_skiplabelp);
998*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
999*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1000*0Sstevel@tonic-gate 
1001*0Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1002*0Sstevel@tonic-gate 			return;
1003*0Sstevel@tonic-gate 		}
1004*0Sstevel@tonic-gate 	}
1005*0Sstevel@tonic-gate 
1006*0Sstevel@tonic-gate 	/* set first ixl cmd */
1007*0Sstevel@tonic-gate 	ixlnextp = wvp->ctxtp->ixl_firstp;
1008*0Sstevel@tonic-gate 
1009*0Sstevel@tonic-gate 	/* follow ixl links until reach end or find error */
1010*0Sstevel@tonic-gate 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
1011*0Sstevel@tonic-gate 
1012*0Sstevel@tonic-gate 		/* set this command as the current ixl command */
1013*0Sstevel@tonic-gate 		ixlcurp = ixlnextp;
1014*0Sstevel@tonic-gate 		ixlnextp = ixlcurp->next_ixlp;
1015*0Sstevel@tonic-gate 
1016*0Sstevel@tonic-gate 		/* get command opcode removing unneeded update flag */
1017*0Sstevel@tonic-gate 		ixlopcode = ixlcurp->ixl_opcode & ~IXL1394_OPF_UPDATE;
1018*0Sstevel@tonic-gate 
1019*0Sstevel@tonic-gate 		/*
1020*0Sstevel@tonic-gate 		 * Scan for next ixl xfer start command (including this one),
1021*0Sstevel@tonic-gate 		 * along ixl link path. Once xfer command found, find next IXL
1022*0Sstevel@tonic-gate 		 * xfer cmd along execution path and fill in branch address of
1023*0Sstevel@tonic-gate 		 * current xfer command. If is composite ixl xfer command, first
1024*0Sstevel@tonic-gate 		 * link forward branch dma addresses of each descriptor block in
1025*0Sstevel@tonic-gate 		 * composite, until reach final one then set its branch address
1026*0Sstevel@tonic-gate 		 * to next execution path xfer found.  Next determine skip mode
1027*0Sstevel@tonic-gate 		 * and fill in skip address(es) appropriately.
1028*0Sstevel@tonic-gate 		 */
1029*0Sstevel@tonic-gate 		/* skip to next if not xfer start ixl command */
1030*0Sstevel@tonic-gate 		if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
1031*0Sstevel@tonic-gate 		    ((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
1032*0Sstevel@tonic-gate 			continue;
1033*0Sstevel@tonic-gate 		}
1034*0Sstevel@tonic-gate 
1035*0Sstevel@tonic-gate 		/*
1036*0Sstevel@tonic-gate 		 * get xfer_ctl structure and composite repeat count for current
1037*0Sstevel@tonic-gate 		 * IXL xfer cmd
1038*0Sstevel@tonic-gate 		 */
1039*0Sstevel@tonic-gate 		xferctl_curp = (hci1394_xfer_ctl_t *)ixlcurp->compiler_privatep;
1040*0Sstevel@tonic-gate 		repcnt = xferctl_curp->cnt;
1041*0Sstevel@tonic-gate 
1042*0Sstevel@tonic-gate 		/*
1043*0Sstevel@tonic-gate 		 * if initiated by an IXL label command, set interrupt enable
1044*0Sstevel@tonic-gate 		 * flag into last component of first descriptor block of
1045*0Sstevel@tonic-gate 		 * current IXL xfer cmd
1046*0Sstevel@tonic-gate 		 */
1047*0Sstevel@tonic-gate 		if ((xferctl_curp->ctl_flags & XCTL_LABELLED) != 0) {
1048*0Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
1049*0Sstevel@tonic-gate 			    xferctl_curp->dma[0].dma_descp;
1050*0Sstevel@tonic-gate 			acc_hdl = xferctl_curp->dma[0].dma_buf->bi_handle;
1051*0Sstevel@tonic-gate 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1052*0Sstevel@tonic-gate 			temp |= DESC_INTR_ENBL;
1053*0Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1054*0Sstevel@tonic-gate 		}
1055*0Sstevel@tonic-gate 
1056*0Sstevel@tonic-gate 		/* find next xfer IXL cmd by following execution path */
1057*0Sstevel@tonic-gate 		err = hci1394_ixl_find_next_exec_xfer(ixlcurp->next_ixlp,
1058*0Sstevel@tonic-gate 		    &callback_cnt, &ixlexecnext);
1059*0Sstevel@tonic-gate 
1060*0Sstevel@tonic-gate 		/* if label<->jump loop detected, return error */
1061*0Sstevel@tonic-gate 		if (err == DDI_FAILURE) {
1062*0Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1063*0Sstevel@tonic-gate 
1064*0Sstevel@tonic-gate 			TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
1065*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1066*0Sstevel@tonic-gate 			    "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
1067*0Sstevel@tonic-gate 			    "w/no xfers", tnf_opaque, ixl_cmdp,
1068*0Sstevel@tonic-gate 			    ixlcurp->next_ixlp);
1069*0Sstevel@tonic-gate 			continue;
1070*0Sstevel@tonic-gate 		}
1071*0Sstevel@tonic-gate 
1072*0Sstevel@tonic-gate 		/* link current IXL's xfer_ctl to next xfer IXL on exec path */
1073*0Sstevel@tonic-gate 		xferctl_curp->execp = ixlexecnext;
1074*0Sstevel@tonic-gate 
1075*0Sstevel@tonic-gate 		/*
1076*0Sstevel@tonic-gate 		 * if callbacks have been seen during execution path scan,
1077*0Sstevel@tonic-gate 		 * set interrupt enable flag into last descriptor of last
1078*0Sstevel@tonic-gate 		 * descriptor block of current IXL xfer cmd
1079*0Sstevel@tonic-gate 		 */
1080*0Sstevel@tonic-gate 		if (callback_cnt != 0) {
1081*0Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
1082*0Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_descp;
1083*0Sstevel@tonic-gate 			acc_hdl =
1084*0Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1085*0Sstevel@tonic-gate 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1086*0Sstevel@tonic-gate 			temp |= DESC_INTR_ENBL;
1087*0Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1088*0Sstevel@tonic-gate 		}
1089*0Sstevel@tonic-gate 
1090*0Sstevel@tonic-gate 		/*
1091*0Sstevel@tonic-gate 		 * obtain dma bound addr of next exec path IXL xfer command,
1092*0Sstevel@tonic-gate 		 * if any
1093*0Sstevel@tonic-gate 		 */
1094*0Sstevel@tonic-gate 		dma_execnext_addr = 0;
1095*0Sstevel@tonic-gate 
1096*0Sstevel@tonic-gate 		if (ixlexecnext != NULL) {
1097*0Sstevel@tonic-gate 			xferctl_nxtp = (hci1394_xfer_ctl_t *)
1098*0Sstevel@tonic-gate 			    ixlexecnext->compiler_privatep;
1099*0Sstevel@tonic-gate 			dma_execnext_addr = xferctl_nxtp->dma[0].dma_bound;
1100*0Sstevel@tonic-gate 		} else {
1101*0Sstevel@tonic-gate 			/*
1102*0Sstevel@tonic-gate 			 * If this is last descriptor (next == NULL), then
1103*0Sstevel@tonic-gate 			 * make sure the interrupt bit is enabled.  This
1104*0Sstevel@tonic-gate 			 * way we can ensure that we are notified when the
1105*0Sstevel@tonic-gate 			 * descriptor chain processing has come to an end.
1106*0Sstevel@tonic-gate 			 */
1107*0Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
1108*0Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_descp;
1109*0Sstevel@tonic-gate 			acc_hdl =
1110*0Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1111*0Sstevel@tonic-gate 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1112*0Sstevel@tonic-gate 			temp |= DESC_INTR_ENBL;
1113*0Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1114*0Sstevel@tonic-gate 		}
1115*0Sstevel@tonic-gate 
1116*0Sstevel@tonic-gate 		/*
1117*0Sstevel@tonic-gate 		 * set jump address of final cur IXL xfer cmd to addr next
1118*0Sstevel@tonic-gate 		 * IXL xfer cmd
1119*0Sstevel@tonic-gate 		 */
1120*0Sstevel@tonic-gate 		hcidescp = (hci1394_desc_t *)
1121*0Sstevel@tonic-gate 		    xferctl_curp->dma[repcnt - 1].dma_descp;
1122*0Sstevel@tonic-gate 		acc_hdl = xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1123*0Sstevel@tonic-gate 		ddi_put32(acc_hdl, &hcidescp->branch, dma_execnext_addr);
1124*0Sstevel@tonic-gate 
1125*0Sstevel@tonic-gate 		/*
1126*0Sstevel@tonic-gate 		 * if a composite object, forward link initial jump
1127*0Sstevel@tonic-gate 		 * dma addresses
1128*0Sstevel@tonic-gate 		 */
1129*0Sstevel@tonic-gate 		for (ii = 0; ii < repcnt - 1; ii++) {
1130*0Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
1131*0Sstevel@tonic-gate 			    xferctl_curp->dma[ii].dma_descp;
1132*0Sstevel@tonic-gate 			acc_hdl	 = xferctl_curp->dma[ii].dma_buf->bi_handle;
1133*0Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->branch,
1134*0Sstevel@tonic-gate 			    xferctl_curp->dma[ii + 1].dma_bound);
1135*0Sstevel@tonic-gate 		}
1136*0Sstevel@tonic-gate 
1137*0Sstevel@tonic-gate 		/*
1138*0Sstevel@tonic-gate 		 * fill in skip address(es) for all descriptor blocks belonging
1139*0Sstevel@tonic-gate 		 * to current IXL xfer command; note:skip addresses apply only
1140*0Sstevel@tonic-gate 		 * to xmit mode commands
1141*0Sstevel@tonic-gate 		 */
1142*0Sstevel@tonic-gate 		if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
1143*0Sstevel@tonic-gate 
1144*0Sstevel@tonic-gate 			/* first obtain and set skip mode information */
1145*0Sstevel@tonic-gate 			wvp->ixl_setskipmode_cmdp = xferctl_curp->skipmodep;
1146*0Sstevel@tonic-gate 			hci1394_set_xmit_skip_mode(wvp);
1147*0Sstevel@tonic-gate 
1148*0Sstevel@tonic-gate 			/*
1149*0Sstevel@tonic-gate 			 * if skip to label,init dma bound addr to be
1150*0Sstevel@tonic-gate 			 * 1st xfer cmd after label
1151*0Sstevel@tonic-gate 			 */
1152*0Sstevel@tonic-gate 			dma_skiplabel_addr = 0;
1153*0Sstevel@tonic-gate 			if ((wvp->skipmode == IXL1394_SKIP_TO_LABEL) &&
1154*0Sstevel@tonic-gate 			    (wvp->skipxferp != NULL)) {
1155*0Sstevel@tonic-gate 				xferctl_nxtp = (hci1394_xfer_ctl_t *)
1156*0Sstevel@tonic-gate 				    wvp->skipxferp->compiler_privatep;
1157*0Sstevel@tonic-gate 				dma_skiplabel_addr =
1158*0Sstevel@tonic-gate 				    xferctl_nxtp->dma[0].dma_bound;
1159*0Sstevel@tonic-gate 			}
1160*0Sstevel@tonic-gate 
1161*0Sstevel@tonic-gate 			/*
1162*0Sstevel@tonic-gate 			 * set skip addrs for each descriptor blk at this
1163*0Sstevel@tonic-gate 			 * xfer start IXL cmd
1164*0Sstevel@tonic-gate 			 */
1165*0Sstevel@tonic-gate 			for (ii = 0; ii < repcnt; ii++) {
1166*0Sstevel@tonic-gate 				switch (wvp->skipmode) {
1167*0Sstevel@tonic-gate 
1168*0Sstevel@tonic-gate 				case IXL1394_SKIP_TO_LABEL:
1169*0Sstevel@tonic-gate 					/* set dma bound address - label */
1170*0Sstevel@tonic-gate 					dma_skip_addr = dma_skiplabel_addr;
1171*0Sstevel@tonic-gate 					break;
1172*0Sstevel@tonic-gate 
1173*0Sstevel@tonic-gate 				case IXL1394_SKIP_TO_NEXT:
1174*0Sstevel@tonic-gate 					/* set dma bound address - next */
1175*0Sstevel@tonic-gate 					if (ii < repcnt - 1) {
1176*0Sstevel@tonic-gate 						dma_skip_addr = xferctl_curp->
1177*0Sstevel@tonic-gate 						    dma[ii + 1].dma_bound;
1178*0Sstevel@tonic-gate 					} else {
1179*0Sstevel@tonic-gate 						dma_skip_addr =
1180*0Sstevel@tonic-gate 						    dma_execnext_addr;
1181*0Sstevel@tonic-gate 					}
1182*0Sstevel@tonic-gate 					break;
1183*0Sstevel@tonic-gate 
1184*0Sstevel@tonic-gate 				case IXL1394_SKIP_TO_SELF:
1185*0Sstevel@tonic-gate 					/* set dma bound address - self */
1186*0Sstevel@tonic-gate 					dma_skip_addr =
1187*0Sstevel@tonic-gate 					    xferctl_curp->dma[ii].dma_bound;
1188*0Sstevel@tonic-gate 					break;
1189*0Sstevel@tonic-gate 
1190*0Sstevel@tonic-gate 				case IXL1394_SKIP_TO_STOP:
1191*0Sstevel@tonic-gate 				default:
1192*0Sstevel@tonic-gate 					/* set dma bound address - stop */
1193*0Sstevel@tonic-gate 					dma_skip_addr = 0;
1194*0Sstevel@tonic-gate 					break;
1195*0Sstevel@tonic-gate 				}
1196*0Sstevel@tonic-gate 
1197*0Sstevel@tonic-gate 				/*
1198*0Sstevel@tonic-gate 				 * determine address of first descriptor of
1199*0Sstevel@tonic-gate 				 * current descriptor block by adjusting addr of
1200*0Sstevel@tonic-gate 				 * last descriptor of current descriptor block
1201*0Sstevel@tonic-gate 				 */
1202*0Sstevel@tonic-gate 				hcidescp = ((hci1394_desc_t *)
1203*0Sstevel@tonic-gate 				    xferctl_curp->dma[ii].dma_descp);
1204*0Sstevel@tonic-gate 				acc_hdl =
1205*0Sstevel@tonic-gate 				    xferctl_curp->dma[ii].dma_buf->bi_handle;
1206*0Sstevel@tonic-gate 
1207*0Sstevel@tonic-gate 				/*
1208*0Sstevel@tonic-gate 				 * adjust by count of descriptors in this desc
1209*0Sstevel@tonic-gate 				 * block not including the last one (size of
1210*0Sstevel@tonic-gate 				 * descriptor)
1211*0Sstevel@tonic-gate 				 */
1212*0Sstevel@tonic-gate 				hcidescp -= ((xferctl_curp->dma[ii].dma_bound &
1213*0Sstevel@tonic-gate 				    DESC_Z_MASK) - 1);
1214*0Sstevel@tonic-gate 
1215*0Sstevel@tonic-gate 				/*
1216*0Sstevel@tonic-gate 				 * adjust further if the last descriptor is
1217*0Sstevel@tonic-gate 				 * double sized
1218*0Sstevel@tonic-gate 				 */
1219*0Sstevel@tonic-gate 				if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
1220*0Sstevel@tonic-gate 					hcidescp++;
1221*0Sstevel@tonic-gate 				}
1222*0Sstevel@tonic-gate 				/*
1223*0Sstevel@tonic-gate 				 * now set skip address into first descriptor
1224*0Sstevel@tonic-gate 				 * of descriptor block
1225*0Sstevel@tonic-gate 				 */
1226*0Sstevel@tonic-gate 				ddi_put32(acc_hdl, &hcidescp->branch,
1227*0Sstevel@tonic-gate 				    dma_skip_addr);
1228*0Sstevel@tonic-gate 			} /* for */
1229*0Sstevel@tonic-gate 		} /* if */
1230*0Sstevel@tonic-gate 	} /* while */
1231*0Sstevel@tonic-gate 
1232*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
1233*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1234*0Sstevel@tonic-gate }
1235*0Sstevel@tonic-gate 
1236*0Sstevel@tonic-gate /*
1237*0Sstevel@tonic-gate  * hci1394_finalize_cur_xfer_desc()
1238*0Sstevel@tonic-gate  *    Build the openHCI descriptor for a packet or buffer based on info
1239*0Sstevel@tonic-gate  *    currently collected into the working vars struct (wvp).  After some
1240*0Sstevel@tonic-gate  *    checks, this routine dispatches to the appropriate descriptor block
1241*0Sstevel@tonic-gate  *    build (bld) routine for the packet or buf type.
1242*0Sstevel@tonic-gate  */
1243*0Sstevel@tonic-gate static void
hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t * wvp)1244*0Sstevel@tonic-gate hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
1245*0Sstevel@tonic-gate {
1246*0Sstevel@tonic-gate 	uint16_t ixlopcode;
1247*0Sstevel@tonic-gate 	uint16_t ixlopraw;
1248*0Sstevel@tonic-gate 
1249*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_enter,
1250*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1251*0Sstevel@tonic-gate 
1252*0Sstevel@tonic-gate 	/* extract opcode from current IXL cmd (if any) */
1253*0Sstevel@tonic-gate 	if (wvp->ixl_cur_cmdp != NULL) {
1254*0Sstevel@tonic-gate 		ixlopcode = wvp->ixl_cur_cmdp->ixl_opcode;
1255*0Sstevel@tonic-gate 		ixlopraw = ixlopcode & ~IXL1394_OPF_UPDATE;
1256*0Sstevel@tonic-gate 	} else {
1257*0Sstevel@tonic-gate 		ixlopcode = ixlopraw = IXL1394_OP_INVALID;
1258*0Sstevel@tonic-gate 	}
1259*0Sstevel@tonic-gate 
1260*0Sstevel@tonic-gate 	/*
1261*0Sstevel@tonic-gate 	 * if no xfer descriptor block being built, perform validity checks
1262*0Sstevel@tonic-gate 	 */
1263*0Sstevel@tonic-gate 	if (wvp->xfer_state == XFER_NONE) {
1264*0Sstevel@tonic-gate 		/*
1265*0Sstevel@tonic-gate 		 * error if being finalized by IXL1394_OP_LABEL or
1266*0Sstevel@tonic-gate 		 * IXL1394_OP_JUMP or if at end, and have an unapplied
1267*0Sstevel@tonic-gate 		 * IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
1268*0Sstevel@tonic-gate 		 * IXL1394_OP_SET_SYNCWAIT
1269*0Sstevel@tonic-gate 		 */
1270*0Sstevel@tonic-gate 		if ((ixlopraw == IXL1394_OP_JUMP) ||
1271*0Sstevel@tonic-gate 		    (ixlopraw == IXL1394_OP_LABEL) ||
1272*0Sstevel@tonic-gate 		    (wvp->ixl_cur_cmdp == NULL) ||
1273*0Sstevel@tonic-gate 		    (wvp->ixl_cur_cmdp->next_ixlp == NULL)) {
1274*0Sstevel@tonic-gate 			if ((wvp->ixl_settagsync_cmdp != NULL) ||
1275*0Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp != NULL) ||
1276*0Sstevel@tonic-gate 			    (wvp->ixl_setsyncwait_cnt != 0)) {
1277*0Sstevel@tonic-gate 
1278*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EUNAPPLIED_SET_CMD;
1279*0Sstevel@tonic-gate 
1280*0Sstevel@tonic-gate 				TNF_PROBE_2(
1281*0Sstevel@tonic-gate 				    hci1394_finalize_cur_xfer_desc_set_error,
1282*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
1283*0Sstevel@tonic-gate 				    errmsg, "IXL1394_UNAPPLIED_SET_CMD: "
1284*0Sstevel@tonic-gate 				    "orphaned set (no associated packet)",
1285*0Sstevel@tonic-gate 				    tnf_opaque, ixl_commandp,
1286*0Sstevel@tonic-gate 				    wvp->ixl_cur_cmdp);
1287*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(
1288*0Sstevel@tonic-gate 				    hci1394_finalize_cur_xfer_desc_exit,
1289*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1290*0Sstevel@tonic-gate 				return;
1291*0Sstevel@tonic-gate 			}
1292*0Sstevel@tonic-gate 		}
1293*0Sstevel@tonic-gate 
1294*0Sstevel@tonic-gate 		/* error if finalize is due to updateable jump cmd */
1295*0Sstevel@tonic-gate 		if (ixlopcode == IXL1394_OP_JUMP_U) {
1296*0Sstevel@tonic-gate 
1297*0Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_EUPDATE_DISALLOWED;
1298*0Sstevel@tonic-gate 
1299*0Sstevel@tonic-gate 			TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_upd_error,
1300*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1301*0Sstevel@tonic-gate 			    "IXL1394_EUPDATE_DISALLOWED: jumpU w/out pkt",
1302*0Sstevel@tonic-gate 			    tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
1303*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1304*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1305*0Sstevel@tonic-gate 			return;
1306*0Sstevel@tonic-gate 		}
1307*0Sstevel@tonic-gate 
1308*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1309*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1310*0Sstevel@tonic-gate 
1311*0Sstevel@tonic-gate 		/* no error, no xfer */
1312*0Sstevel@tonic-gate 		return;
1313*0Sstevel@tonic-gate 	}
1314*0Sstevel@tonic-gate 
1315*0Sstevel@tonic-gate 	/*
1316*0Sstevel@tonic-gate 	 * finalize current xfer descriptor block being built
1317*0Sstevel@tonic-gate 	 */
1318*0Sstevel@tonic-gate 
1319*0Sstevel@tonic-gate 	/* count IXL xfer start command for descriptor block being built */
1320*0Sstevel@tonic-gate 	wvp->ixl_xfer_st_cnt++;
1321*0Sstevel@tonic-gate 
1322*0Sstevel@tonic-gate 	/*
1323*0Sstevel@tonic-gate 	 * complete setting of cache flush evaluation flags; flags will already
1324*0Sstevel@tonic-gate 	 * have been set by updateable set cmds and non-start xfer pkt cmds
1325*0Sstevel@tonic-gate 	 */
1326*0Sstevel@tonic-gate 	/* now set cache flush flag if current xfer start cmnd is updateable */
1327*0Sstevel@tonic-gate 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & IXL1394_OPF_UPDATE) != 0) {
1328*0Sstevel@tonic-gate 		wvp->xfer_hci_flush |= UPDATEABLE_XFER;
1329*0Sstevel@tonic-gate 	}
1330*0Sstevel@tonic-gate 	/*
1331*0Sstevel@tonic-gate 	 * also set cache flush flag if xfer being finalized by
1332*0Sstevel@tonic-gate 	 * updateable jump cmd
1333*0Sstevel@tonic-gate 	 */
1334*0Sstevel@tonic-gate 	if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
1335*0Sstevel@tonic-gate 		wvp->xfer_hci_flush |= UPDATEABLE_JUMP;
1336*0Sstevel@tonic-gate 	}
1337*0Sstevel@tonic-gate 
1338*0Sstevel@tonic-gate 	/*
1339*0Sstevel@tonic-gate 	 * Determine if cache flush required before building next descriptor
1340*0Sstevel@tonic-gate 	 * block. If xfer pkt command and any cache flush flags are set,
1341*0Sstevel@tonic-gate 	 * hci flush needed.
1342*0Sstevel@tonic-gate 	 * If buffer or special xfer command and xfer command is updateable or
1343*0Sstevel@tonic-gate 	 * an associated set command is updateable, hci flush is required now.
1344*0Sstevel@tonic-gate 	 * If a single-xfer buffer or special xfer command is finalized by
1345*0Sstevel@tonic-gate 	 * updateable jump command, hci flush is required now.
1346*0Sstevel@tonic-gate 	 * Note: a cache flush will be required later, before the last
1347*0Sstevel@tonic-gate 	 * descriptor block of a multi-xfer set of descriptor blocks is built,
1348*0Sstevel@tonic-gate 	 * if this (non-pkt) xfer is finalized by an updateable jump command.
1349*0Sstevel@tonic-gate 	 */
1350*0Sstevel@tonic-gate 	if (wvp->xfer_hci_flush != 0) {
1351*0Sstevel@tonic-gate 		if (((wvp->ixl_cur_xfer_stp->ixl_opcode &
1352*0Sstevel@tonic-gate 		    IXL1394_OPTY_XFER_PKT_ST) != 0) || ((wvp->xfer_hci_flush &
1353*0Sstevel@tonic-gate 			(UPDATEABLE_XFER | UPDATEABLE_SET | INITIATING_LBL)) !=
1354*0Sstevel@tonic-gate 			0)) {
1355*0Sstevel@tonic-gate 
1356*0Sstevel@tonic-gate 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
1357*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(
1358*0Sstevel@tonic-gate 				    hci1394_finalize_cur_xfer_desc_exit,
1359*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1360*0Sstevel@tonic-gate 
1361*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
1362*0Sstevel@tonic-gate 				return;
1363*0Sstevel@tonic-gate 			}
1364*0Sstevel@tonic-gate 		}
1365*0Sstevel@tonic-gate 	}
1366*0Sstevel@tonic-gate 
1367*0Sstevel@tonic-gate 	/*
1368*0Sstevel@tonic-gate 	 * determine which kind of descriptor block to build based on
1369*0Sstevel@tonic-gate 	 * xfer state - hdr only, skip cycle, pkt or buf.
1370*0Sstevel@tonic-gate 	 */
1371*0Sstevel@tonic-gate 	switch (wvp->xfer_state) {
1372*0Sstevel@tonic-gate 
1373*0Sstevel@tonic-gate 	case XFER_PKT:
1374*0Sstevel@tonic-gate 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1375*0Sstevel@tonic-gate 			hci1394_bld_recv_pkt_desc(wvp);
1376*0Sstevel@tonic-gate 		} else {
1377*0Sstevel@tonic-gate 			hci1394_bld_xmit_pkt_desc(wvp);
1378*0Sstevel@tonic-gate 		}
1379*0Sstevel@tonic-gate 		break;
1380*0Sstevel@tonic-gate 
1381*0Sstevel@tonic-gate 	case XFER_BUF:
1382*0Sstevel@tonic-gate 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1383*0Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
1384*0Sstevel@tonic-gate 				hci1394_bld_recv_buf_fill_desc(wvp);
1385*0Sstevel@tonic-gate 			} else {
1386*0Sstevel@tonic-gate 				hci1394_bld_recv_buf_ppb_desc(wvp);
1387*0Sstevel@tonic-gate 			}
1388*0Sstevel@tonic-gate 		} else {
1389*0Sstevel@tonic-gate 			hci1394_bld_xmit_buf_desc(wvp);
1390*0Sstevel@tonic-gate 		}
1391*0Sstevel@tonic-gate 		break;
1392*0Sstevel@tonic-gate 
1393*0Sstevel@tonic-gate 	case XMIT_HDRONLY:
1394*0Sstevel@tonic-gate 	case XMIT_NOPKT:
1395*0Sstevel@tonic-gate 		hci1394_bld_xmit_hdronly_nopkt_desc(wvp);
1396*0Sstevel@tonic-gate 		break;
1397*0Sstevel@tonic-gate 
1398*0Sstevel@tonic-gate 	default:
1399*0Sstevel@tonic-gate 		/* internal compiler error */
1400*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_internal_error,
1401*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1402*0Sstevel@tonic-gate 		    "IXL1394_INTERNAL_ERROR: invalid state", tnf_opaque,
1403*0Sstevel@tonic-gate 		    ixl_commandp, wvp->ixl_cur_cmdp);
1404*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
1405*0Sstevel@tonic-gate 	}
1406*0Sstevel@tonic-gate 
1407*0Sstevel@tonic-gate 	/* return if error */
1408*0Sstevel@tonic-gate 	if (wvp->dma_bld_error != 0) {
1409*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1410*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1411*0Sstevel@tonic-gate 
1412*0Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
1413*0Sstevel@tonic-gate 		return;
1414*0Sstevel@tonic-gate 	}
1415*0Sstevel@tonic-gate 
1416*0Sstevel@tonic-gate 	/*
1417*0Sstevel@tonic-gate 	 * if was finalizing IXL jump cmd, set compiler_privatep to
1418*0Sstevel@tonic-gate 	 * cur xfer IXL cmd
1419*0Sstevel@tonic-gate 	 */
1420*0Sstevel@tonic-gate 	if (ixlopraw == IXL1394_OP_JUMP) {
1421*0Sstevel@tonic-gate 		wvp->ixl_cur_cmdp->compiler_privatep =
1422*0Sstevel@tonic-gate 		    (void *)wvp->ixl_cur_xfer_stp;
1423*0Sstevel@tonic-gate 	}
1424*0Sstevel@tonic-gate 
1425*0Sstevel@tonic-gate 	/* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
1426*0Sstevel@tonic-gate 	if (wvp->ixl_cur_labelp != NULL) {
1427*0Sstevel@tonic-gate 		((hci1394_xfer_ctl_t *)
1428*0Sstevel@tonic-gate 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->ctl_flags |=
1429*0Sstevel@tonic-gate 		    XCTL_LABELLED;
1430*0Sstevel@tonic-gate 		wvp->ixl_cur_labelp = NULL;
1431*0Sstevel@tonic-gate 	}
1432*0Sstevel@tonic-gate 
1433*0Sstevel@tonic-gate 	/*
1434*0Sstevel@tonic-gate 	 * set any associated IXL set skipmode cmd into xfer_ctl of
1435*0Sstevel@tonic-gate 	 * cur xfer IXL cmd
1436*0Sstevel@tonic-gate 	 */
1437*0Sstevel@tonic-gate 	if (wvp->ixl_setskipmode_cmdp != NULL) {
1438*0Sstevel@tonic-gate 		((hci1394_xfer_ctl_t *)
1439*0Sstevel@tonic-gate 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->skipmodep =
1440*0Sstevel@tonic-gate 		    wvp->ixl_setskipmode_cmdp;
1441*0Sstevel@tonic-gate 	}
1442*0Sstevel@tonic-gate 
1443*0Sstevel@tonic-gate 	/* set no current xfer start cmd */
1444*0Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp = NULL;
1445*0Sstevel@tonic-gate 
1446*0Sstevel@tonic-gate 	/* set no current set tag&sync, set skipmode or set syncwait commands */
1447*0Sstevel@tonic-gate 	wvp->ixl_settagsync_cmdp = NULL;
1448*0Sstevel@tonic-gate 	wvp->ixl_setskipmode_cmdp = NULL;
1449*0Sstevel@tonic-gate 	wvp->ixl_setsyncwait_cnt = 0;
1450*0Sstevel@tonic-gate 
1451*0Sstevel@tonic-gate 	/* set no currently active descriptor blocks */
1452*0Sstevel@tonic-gate 	wvp->descriptors = 0;
1453*0Sstevel@tonic-gate 
1454*0Sstevel@tonic-gate 	/* reset total packet length and buffers count */
1455*0Sstevel@tonic-gate 	wvp->xfer_pktlen = 0;
1456*0Sstevel@tonic-gate 	wvp->xfer_bufcnt = 0;
1457*0Sstevel@tonic-gate 
1458*0Sstevel@tonic-gate 	/* reset flush cache evaluation flags */
1459*0Sstevel@tonic-gate 	wvp->xfer_hci_flush = 0;
1460*0Sstevel@tonic-gate 
1461*0Sstevel@tonic-gate 	/* set no xmit descriptor block being built */
1462*0Sstevel@tonic-gate 	wvp->xfer_state = XFER_NONE;
1463*0Sstevel@tonic-gate 
1464*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1465*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1466*0Sstevel@tonic-gate }
1467*0Sstevel@tonic-gate 
1468*0Sstevel@tonic-gate /*
1469*0Sstevel@tonic-gate  * hci1394_bld_recv_pkt_desc()
1470*0Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a receive packet.
1471*0Sstevel@tonic-gate  */
1472*0Sstevel@tonic-gate static void
hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t * wvp)1473*0Sstevel@tonic-gate hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1474*0Sstevel@tonic-gate {
1475*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
1476*0Sstevel@tonic-gate 	caddr_t			dma_descp;
1477*0Sstevel@tonic-gate 	uint32_t		dma_desc_bound;
1478*0Sstevel@tonic-gate 	uint32_t		wait_for_sync;
1479*0Sstevel@tonic-gate 	uint32_t		ii;
1480*0Sstevel@tonic-gate 	hci1394_desc_t		*wv_descp;	/* shorthand to local descrpt */
1481*0Sstevel@tonic-gate 
1482*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_enter,
1483*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1484*0Sstevel@tonic-gate 
1485*0Sstevel@tonic-gate 	/*
1486*0Sstevel@tonic-gate 	 * is error if number of descriptors to be built exceeds maximum
1487*0Sstevel@tonic-gate 	 * descriptors allowed in a descriptor block.
1488*0Sstevel@tonic-gate 	 */
1489*0Sstevel@tonic-gate 	if ((wvp->descriptors + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1490*0Sstevel@tonic-gate 
1491*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1492*0Sstevel@tonic-gate 
1493*0Sstevel@tonic-gate 		TNF_PROBE_3(hci1394_bld_recv_pkt_desc_fragment_oflo_error,
1494*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1495*0Sstevel@tonic-gate 		    "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
1496*0Sstevel@tonic-gate 		    wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
1497*0Sstevel@tonic-gate 		    wvp->descriptors + wvp->xfer_bufcnt);
1498*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1499*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1500*0Sstevel@tonic-gate 		return;
1501*0Sstevel@tonic-gate 	}
1502*0Sstevel@tonic-gate 
1503*0Sstevel@tonic-gate 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1504*0Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1505*0Sstevel@tonic-gate 
1506*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1507*0Sstevel@tonic-gate 
1508*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_bld_recv_pkt_desc_mem_alloc_fail,
1509*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1510*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1511*0Sstevel@tonic-gate 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1512*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1513*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1514*0Sstevel@tonic-gate 		return;
1515*0Sstevel@tonic-gate 	}
1516*0Sstevel@tonic-gate 
1517*0Sstevel@tonic-gate 	/*
1518*0Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
1519*0Sstevel@tonic-gate 	 * current IXL xfer cmd
1520*0Sstevel@tonic-gate 	 */
1521*0Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1522*0Sstevel@tonic-gate 
1523*0Sstevel@tonic-gate 	/*
1524*0Sstevel@tonic-gate 	 * if enabled, set wait for sync flag in first descriptor of
1525*0Sstevel@tonic-gate 	 * descriptor block
1526*0Sstevel@tonic-gate 	 */
1527*0Sstevel@tonic-gate 	if (wvp->ixl_setsyncwait_cnt > 0) {
1528*0Sstevel@tonic-gate 		wvp->ixl_setsyncwait_cnt = 1;
1529*0Sstevel@tonic-gate 		wait_for_sync = DESC_W_ENBL;
1530*0Sstevel@tonic-gate 	} else {
1531*0Sstevel@tonic-gate 		wait_for_sync = DESC_W_DSABL;
1532*0Sstevel@tonic-gate 	}
1533*0Sstevel@tonic-gate 
1534*0Sstevel@tonic-gate 	/* create descriptor block for this recv packet (xfer status enabled) */
1535*0Sstevel@tonic-gate 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1536*0Sstevel@tonic-gate 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
1537*0Sstevel@tonic-gate 
1538*0Sstevel@tonic-gate 		if (ii == (wvp->xfer_bufcnt - 1)) {
1539*0Sstevel@tonic-gate 			HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL,
1540*0Sstevel@tonic-gate 			    DESC_INTR_DSABL, wait_for_sync, wvp->xfer_size[ii]);
1541*0Sstevel@tonic-gate 		} else {
1542*0Sstevel@tonic-gate 			HCI1394_INIT_IR_PPB_IMORE(wv_descp, wait_for_sync,
1543*0Sstevel@tonic-gate 			    wvp->xfer_size[ii]);
1544*0Sstevel@tonic-gate 		}
1545*0Sstevel@tonic-gate 		wv_descp->data_addr = wvp->xfer_bufp[ii];
1546*0Sstevel@tonic-gate 		wv_descp->branch = 0;
1547*0Sstevel@tonic-gate 		wv_descp->status = (wvp->xfer_size[ii] <<
1548*0Sstevel@tonic-gate 		    DESC_ST_RESCOUNT_SHIFT) & DESC_ST_RESCOUNT_MASK;
1549*0Sstevel@tonic-gate 		wvp->descriptors++;
1550*0Sstevel@tonic-gate 	}
1551*0Sstevel@tonic-gate 
1552*0Sstevel@tonic-gate 	/* allocate and copy descriptor block to dma memory */
1553*0Sstevel@tonic-gate 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1554*0Sstevel@tonic-gate 	    DDI_SUCCESS) {
1555*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1556*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1557*0Sstevel@tonic-gate 
1558*0Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above function call */
1559*0Sstevel@tonic-gate 		return;
1560*0Sstevel@tonic-gate 	}
1561*0Sstevel@tonic-gate 
1562*0Sstevel@tonic-gate 	/*
1563*0Sstevel@tonic-gate 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1564*0Sstevel@tonic-gate 	 * is last component)
1565*0Sstevel@tonic-gate 	 */
1566*0Sstevel@tonic-gate 	xctlp->dma[0].dma_bound = dma_desc_bound;
1567*0Sstevel@tonic-gate 	xctlp->dma[0].dma_descp =
1568*0Sstevel@tonic-gate 	    dma_descp + (wvp->xfer_bufcnt - 1) * sizeof (hci1394_desc_t);
1569*0Sstevel@tonic-gate 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1570*0Sstevel@tonic-gate 
1571*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1572*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1573*0Sstevel@tonic-gate }
1574*0Sstevel@tonic-gate 
1575*0Sstevel@tonic-gate /*
1576*0Sstevel@tonic-gate  * hci1394_bld_recv_buf_ppb_desc()
1577*0Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a receive buf
1578*0Sstevel@tonic-gate  *    in packet per buffer mode.
1579*0Sstevel@tonic-gate  */
1580*0Sstevel@tonic-gate static void
hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t * wvp)1581*0Sstevel@tonic-gate hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp)
1582*0Sstevel@tonic-gate {
1583*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
1584*0Sstevel@tonic-gate 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1585*0Sstevel@tonic-gate 	caddr_t		dma_descp;
1586*0Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
1587*0Sstevel@tonic-gate 	uint32_t	pktsize;
1588*0Sstevel@tonic-gate 	uint32_t	pktcnt;
1589*0Sstevel@tonic-gate 	uint32_t	wait_for_sync;
1590*0Sstevel@tonic-gate 	uint32_t	ii;
1591*0Sstevel@tonic-gate 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1592*0Sstevel@tonic-gate 
1593*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_enter,
1594*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1595*0Sstevel@tonic-gate 
1596*0Sstevel@tonic-gate 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1597*0Sstevel@tonic-gate 
1598*0Sstevel@tonic-gate 	/* determine number and size of pkt desc blocks to create */
1599*0Sstevel@tonic-gate 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
1600*0Sstevel@tonic-gate 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1601*0Sstevel@tonic-gate 
1602*0Sstevel@tonic-gate 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1603*0Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1604*0Sstevel@tonic-gate 
1605*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1606*0Sstevel@tonic-gate 
1607*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_bld_recv_buf_ppb_desc_mem_alloc_fail,
1608*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1609*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1610*0Sstevel@tonic-gate 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1611*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1612*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1613*0Sstevel@tonic-gate 		return;
1614*0Sstevel@tonic-gate 	}
1615*0Sstevel@tonic-gate 
1616*0Sstevel@tonic-gate 	/*
1617*0Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
1618*0Sstevel@tonic-gate 	 * current IXL xfer cmd
1619*0Sstevel@tonic-gate 	 */
1620*0Sstevel@tonic-gate 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1621*0Sstevel@tonic-gate 
1622*0Sstevel@tonic-gate 	/*
1623*0Sstevel@tonic-gate 	 * if enabled, set wait for sync flag in first descriptor in
1624*0Sstevel@tonic-gate 	 * descriptor block
1625*0Sstevel@tonic-gate 	 */
1626*0Sstevel@tonic-gate 	if (wvp->ixl_setsyncwait_cnt > 0) {
1627*0Sstevel@tonic-gate 		wvp->ixl_setsyncwait_cnt = 1;
1628*0Sstevel@tonic-gate 		wait_for_sync = DESC_W_ENBL;
1629*0Sstevel@tonic-gate 	} else {
1630*0Sstevel@tonic-gate 		wait_for_sync = DESC_W_DSABL;
1631*0Sstevel@tonic-gate 	}
1632*0Sstevel@tonic-gate 
1633*0Sstevel@tonic-gate 	/* create first descriptor block for this recv packet */
1634*0Sstevel@tonic-gate 	/* consists of one descriptor and xfer status is enabled */
1635*0Sstevel@tonic-gate 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
1636*0Sstevel@tonic-gate 	HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
1637*0Sstevel@tonic-gate 	    wait_for_sync, pktsize);
1638*0Sstevel@tonic-gate 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1639*0Sstevel@tonic-gate 	wv_descp->branch = 0;
1640*0Sstevel@tonic-gate 	wv_descp->status = (pktsize << DESC_ST_RESCOUNT_SHIFT) &
1641*0Sstevel@tonic-gate 	    DESC_ST_RESCOUNT_MASK;
1642*0Sstevel@tonic-gate 	wvp->descriptors++;
1643*0Sstevel@tonic-gate 
1644*0Sstevel@tonic-gate 	/* useful debug trace info - IXL command, and packet count and size */
1645*0Sstevel@tonic-gate 	TNF_PROBE_3_DEBUG(hci1394_bld_recv_buf_ppb_desc_recv_buf_info,
1646*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ixl_commandp,
1647*0Sstevel@tonic-gate 	    wvp->ixl_cur_xfer_stp, tnf_int, pkt_count, pktcnt, tnf_int,
1648*0Sstevel@tonic-gate 	    pkt_size, pktsize);
1649*0Sstevel@tonic-gate 
1650*0Sstevel@tonic-gate 	/*
1651*0Sstevel@tonic-gate 	 * generate as many contiguous descriptor blocks as there are
1652*0Sstevel@tonic-gate 	 * recv pkts
1653*0Sstevel@tonic-gate 	 */
1654*0Sstevel@tonic-gate 	for (ii = 0; ii < pktcnt; ii++) {
1655*0Sstevel@tonic-gate 
1656*0Sstevel@tonic-gate 		/* if about to create last descriptor block */
1657*0Sstevel@tonic-gate 		if (ii == (pktcnt - 1)) {
1658*0Sstevel@tonic-gate 			/* check and perform any required hci cache flush */
1659*0Sstevel@tonic-gate 			if (hci1394_flush_end_desc_check(wvp, ii) !=
1660*0Sstevel@tonic-gate 			    DDI_SUCCESS) {
1661*0Sstevel@tonic-gate 				TNF_PROBE_1_DEBUG(
1662*0Sstevel@tonic-gate 				    hci1394_bld_recv_buf_ppb_desc_fl_error,
1663*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_int,
1664*0Sstevel@tonic-gate 				    for_ii, ii);
1665*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(
1666*0Sstevel@tonic-gate 				    hci1394_bld_recv_buf_ppb_desc_exit,
1667*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1668*0Sstevel@tonic-gate 
1669*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
1670*0Sstevel@tonic-gate 				return;
1671*0Sstevel@tonic-gate 			}
1672*0Sstevel@tonic-gate 		}
1673*0Sstevel@tonic-gate 
1674*0Sstevel@tonic-gate 		/* allocate and copy descriptor block to dma memory */
1675*0Sstevel@tonic-gate 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1676*0Sstevel@tonic-gate 		    &dma_desc_bound) != DDI_SUCCESS) {
1677*0Sstevel@tonic-gate 
1678*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1679*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1680*0Sstevel@tonic-gate 
1681*0Sstevel@tonic-gate 			/* wvp->dma_bld_error is set by above call */
1682*0Sstevel@tonic-gate 			return;
1683*0Sstevel@tonic-gate 		}
1684*0Sstevel@tonic-gate 
1685*0Sstevel@tonic-gate 		/*
1686*0Sstevel@tonic-gate 		 * set dma addrs into xfer_ctl struct (unbound addr (kernel
1687*0Sstevel@tonic-gate 		 * virtual) is last component (descriptor))
1688*0Sstevel@tonic-gate 		 */
1689*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_bound = dma_desc_bound;
1690*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_descp = dma_descp;
1691*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
1692*0Sstevel@tonic-gate 
1693*0Sstevel@tonic-gate 		/* advance buffer ptr by pktsize in descriptor block */
1694*0Sstevel@tonic-gate 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
1695*0Sstevel@tonic-gate 		    pktsize;
1696*0Sstevel@tonic-gate 	}
1697*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1698*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1699*0Sstevel@tonic-gate }
1700*0Sstevel@tonic-gate 
1701*0Sstevel@tonic-gate /*
1702*0Sstevel@tonic-gate  * hci1394_bld_recv_buf_fill_desc()
1703*0Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a receive buf
1704*0Sstevel@tonic-gate  *    in buffer fill mode.
1705*0Sstevel@tonic-gate  */
1706*0Sstevel@tonic-gate static void
hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t * wvp)1707*0Sstevel@tonic-gate hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp)
1708*0Sstevel@tonic-gate {
1709*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
1710*0Sstevel@tonic-gate 	caddr_t			dma_descp;
1711*0Sstevel@tonic-gate 	uint32_t		dma_desc_bound;
1712*0Sstevel@tonic-gate 	uint32_t		wait_for_sync;
1713*0Sstevel@tonic-gate 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1714*0Sstevel@tonic-gate 
1715*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_enter,
1716*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1717*0Sstevel@tonic-gate 
1718*0Sstevel@tonic-gate 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1719*0Sstevel@tonic-gate 
1720*0Sstevel@tonic-gate 
1721*0Sstevel@tonic-gate 	/* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
1722*0Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1723*0Sstevel@tonic-gate 
1724*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1725*0Sstevel@tonic-gate 
1726*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_bld_recv_buf_fill_desc_mem_alloc_fail,
1727*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1728*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: xfer_ctl", tnf_opaque,
1729*0Sstevel@tonic-gate 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1730*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1731*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1732*0Sstevel@tonic-gate 		return;
1733*0Sstevel@tonic-gate 	}
1734*0Sstevel@tonic-gate 
1735*0Sstevel@tonic-gate 	/*
1736*0Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
1737*0Sstevel@tonic-gate 	 * current IXL xfer cmd
1738*0Sstevel@tonic-gate 	 */
1739*0Sstevel@tonic-gate 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1740*0Sstevel@tonic-gate 
1741*0Sstevel@tonic-gate 	/*
1742*0Sstevel@tonic-gate 	 * if enabled, set wait for sync flag in first descriptor of
1743*0Sstevel@tonic-gate 	 * descriptor block
1744*0Sstevel@tonic-gate 	 */
1745*0Sstevel@tonic-gate 	if (wvp->ixl_setsyncwait_cnt > 0) {
1746*0Sstevel@tonic-gate 		wvp->ixl_setsyncwait_cnt = 1;
1747*0Sstevel@tonic-gate 		wait_for_sync = DESC_W_ENBL;
1748*0Sstevel@tonic-gate 	} else {
1749*0Sstevel@tonic-gate 		wait_for_sync = DESC_W_DSABL;
1750*0Sstevel@tonic-gate 	}
1751*0Sstevel@tonic-gate 
1752*0Sstevel@tonic-gate 	/*
1753*0Sstevel@tonic-gate 	 * create descriptor block for this buffer fill mode recv command which
1754*0Sstevel@tonic-gate 	 * consists of one descriptor with xfer status enabled
1755*0Sstevel@tonic-gate 	 */
1756*0Sstevel@tonic-gate 	HCI1394_INIT_IR_BF_IMORE(&wvp->descriptor_block[wvp->descriptors],
1757*0Sstevel@tonic-gate 	    DESC_INTR_DSABL, wait_for_sync, local_ixl_cur_xfer_stp->size);
1758*0Sstevel@tonic-gate 
1759*0Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors].data_addr =
1760*0Sstevel@tonic-gate 	    local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1761*0Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors].branch = 0;
1762*0Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors].status =
1763*0Sstevel@tonic-gate 	    (local_ixl_cur_xfer_stp->size << DESC_ST_RESCOUNT_SHIFT) &
1764*0Sstevel@tonic-gate 	    DESC_ST_RESCOUNT_MASK;
1765*0Sstevel@tonic-gate 	wvp->descriptors++;
1766*0Sstevel@tonic-gate 
1767*0Sstevel@tonic-gate 	/* check and perform any required hci cache flush */
1768*0Sstevel@tonic-gate 	if (hci1394_flush_end_desc_check(wvp, 0) != DDI_SUCCESS) {
1769*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1770*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1771*0Sstevel@tonic-gate 
1772*0Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
1773*0Sstevel@tonic-gate 		return;
1774*0Sstevel@tonic-gate 	}
1775*0Sstevel@tonic-gate 
1776*0Sstevel@tonic-gate 	/* allocate and copy descriptor block to dma memory */
1777*0Sstevel@tonic-gate 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound)
1778*0Sstevel@tonic-gate 	    != DDI_SUCCESS) {
1779*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1780*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1781*0Sstevel@tonic-gate 
1782*0Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
1783*0Sstevel@tonic-gate 		return;
1784*0Sstevel@tonic-gate 	}
1785*0Sstevel@tonic-gate 
1786*0Sstevel@tonic-gate 	/*
1787*0Sstevel@tonic-gate 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1788*0Sstevel@tonic-gate 	 * is last component.
1789*0Sstevel@tonic-gate 	 */
1790*0Sstevel@tonic-gate 	xctlp->dma[0].dma_bound = dma_desc_bound;
1791*0Sstevel@tonic-gate 	xctlp->dma[0].dma_descp = dma_descp;
1792*0Sstevel@tonic-gate 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1793*0Sstevel@tonic-gate 
1794*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1795*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1796*0Sstevel@tonic-gate }
1797*0Sstevel@tonic-gate 
1798*0Sstevel@tonic-gate /*
1799*0Sstevel@tonic-gate  * hci1394_bld_xmit_pkt_desc()
1800*0Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a transmit packet.
1801*0Sstevel@tonic-gate  */
1802*0Sstevel@tonic-gate static void
hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t * wvp)1803*0Sstevel@tonic-gate hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1804*0Sstevel@tonic-gate {
1805*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t *xctlp;
1806*0Sstevel@tonic-gate 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1807*0Sstevel@tonic-gate 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1808*0Sstevel@tonic-gate 	caddr_t		dma_descp;	/* dma bound memory for descriptor */
1809*0Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
1810*0Sstevel@tonic-gate 	uint32_t	ii;
1811*0Sstevel@tonic-gate 
1812*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_enter,
1813*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1814*0Sstevel@tonic-gate 
1815*0Sstevel@tonic-gate 	/*
1816*0Sstevel@tonic-gate 	 * is error if number of descriptors to be built exceeds maximum
1817*0Sstevel@tonic-gate 	 * descriptors allowed in a descriptor block. Add 2 for the overhead
1818*0Sstevel@tonic-gate 	 * of the OMORE-Immediate.
1819*0Sstevel@tonic-gate 	 */
1820*0Sstevel@tonic-gate 	if ((wvp->descriptors + 2 + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1821*0Sstevel@tonic-gate 
1822*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1823*0Sstevel@tonic-gate 
1824*0Sstevel@tonic-gate 		TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_fragment_oflo_error,
1825*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1826*0Sstevel@tonic-gate 		    "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
1827*0Sstevel@tonic-gate 		    wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
1828*0Sstevel@tonic-gate 		    wvp->descriptors + 2 + wvp->xfer_bufcnt);
1829*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1830*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1831*0Sstevel@tonic-gate 		return;
1832*0Sstevel@tonic-gate 	}
1833*0Sstevel@tonic-gate 
1834*0Sstevel@tonic-gate 	/* is error if total packet length exceeds 0xFFFF */
1835*0Sstevel@tonic-gate 	if (wvp->xfer_pktlen > 0xFFFF) {
1836*0Sstevel@tonic-gate 
1837*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EPKTSIZE_MAX_OFLO;
1838*0Sstevel@tonic-gate 
1839*0Sstevel@tonic-gate 		TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_packet_oflo_error,
1840*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1841*0Sstevel@tonic-gate 		    "IXL1394_EPKTSIZE_MAX_OFLO", tnf_opaque, ixl_commandp,
1842*0Sstevel@tonic-gate 		    wvp->ixl_cur_xfer_stp, tnf_int, total_pktlen,
1843*0Sstevel@tonic-gate 		    wvp->xfer_pktlen);
1844*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1845*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1846*0Sstevel@tonic-gate 		return;
1847*0Sstevel@tonic-gate 	}
1848*0Sstevel@tonic-gate 
1849*0Sstevel@tonic-gate 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1850*0Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1851*0Sstevel@tonic-gate 
1852*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1853*0Sstevel@tonic-gate 
1854*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_bld_xmit_pkt_desc_mem_alloc_fail,
1855*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1856*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1857*0Sstevel@tonic-gate 		    ixl_commandp, wvp->ixl_cur_cmdp);
1858*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1859*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1860*0Sstevel@tonic-gate 		return;
1861*0Sstevel@tonic-gate 	}
1862*0Sstevel@tonic-gate 
1863*0Sstevel@tonic-gate 	/*
1864*0Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
1865*0Sstevel@tonic-gate 	 * current IXL xfer cmd
1866*0Sstevel@tonic-gate 	 */
1867*0Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1868*0Sstevel@tonic-gate 
1869*0Sstevel@tonic-gate 	/* generate values for the xmit pkt hdrs */
1870*0Sstevel@tonic-gate 	hci1394_set_xmit_pkt_hdr(wvp);
1871*0Sstevel@tonic-gate 
1872*0Sstevel@tonic-gate 	/*
1873*0Sstevel@tonic-gate 	 * xmit pkt starts with an output more immediate,
1874*0Sstevel@tonic-gate 	 * a double sized hci1394_desc
1875*0Sstevel@tonic-gate 	 */
1876*0Sstevel@tonic-gate 	wv_omi_descp = (hci1394_output_more_imm_t *)
1877*0Sstevel@tonic-gate 	    (&wvp->descriptor_block[wvp->descriptors]);
1878*0Sstevel@tonic-gate 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1879*0Sstevel@tonic-gate 
1880*0Sstevel@tonic-gate 	wv_omi_descp->data_addr = 0;
1881*0Sstevel@tonic-gate 	wv_omi_descp->branch = 0;
1882*0Sstevel@tonic-gate 	wv_omi_descp->status = 0;
1883*0Sstevel@tonic-gate 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1884*0Sstevel@tonic-gate 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1885*0Sstevel@tonic-gate 	wv_omi_descp->q3 = 0;
1886*0Sstevel@tonic-gate 	wv_omi_descp->q4 = 0;
1887*0Sstevel@tonic-gate 
1888*0Sstevel@tonic-gate 	wvp->descriptors += 2;
1889*0Sstevel@tonic-gate 
1890*0Sstevel@tonic-gate 	/*
1891*0Sstevel@tonic-gate 	 * create the required output more hci1394_desc descriptor, then create
1892*0Sstevel@tonic-gate 	 * an output last hci1394_desc descriptor with xfer status enabled
1893*0Sstevel@tonic-gate 	 */
1894*0Sstevel@tonic-gate 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1895*0Sstevel@tonic-gate 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
1896*0Sstevel@tonic-gate 
1897*0Sstevel@tonic-gate 		if (ii == (wvp->xfer_bufcnt - 1)) {
1898*0Sstevel@tonic-gate 			HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL,
1899*0Sstevel@tonic-gate 			    DESC_INTR_DSABL, wvp->xfer_size[ii]);
1900*0Sstevel@tonic-gate 		} else {
1901*0Sstevel@tonic-gate 			HCI1394_INIT_IT_OMORE(wv_descp, wvp->xfer_size[ii]);
1902*0Sstevel@tonic-gate 		}
1903*0Sstevel@tonic-gate 		wv_descp->data_addr = wvp->xfer_bufp[ii];
1904*0Sstevel@tonic-gate 		wv_descp->branch = 0;
1905*0Sstevel@tonic-gate 		wv_descp->status = 0;
1906*0Sstevel@tonic-gate 		wvp->descriptors++;
1907*0Sstevel@tonic-gate 	}
1908*0Sstevel@tonic-gate 
1909*0Sstevel@tonic-gate 	/* allocate and copy descriptor block to dma memory */
1910*0Sstevel@tonic-gate 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1911*0Sstevel@tonic-gate 	    DDI_SUCCESS) {
1912*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1913*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1914*0Sstevel@tonic-gate 
1915*0Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
1916*0Sstevel@tonic-gate 		return;
1917*0Sstevel@tonic-gate 	}
1918*0Sstevel@tonic-gate 
1919*0Sstevel@tonic-gate 	/*
1920*0Sstevel@tonic-gate 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1921*0Sstevel@tonic-gate 	 * is last component (descriptor))
1922*0Sstevel@tonic-gate 	 */
1923*0Sstevel@tonic-gate 	xctlp->dma[0].dma_bound = dma_desc_bound;
1924*0Sstevel@tonic-gate 	xctlp->dma[0].dma_descp =
1925*0Sstevel@tonic-gate 	    dma_descp + (wvp->xfer_bufcnt + 1) * sizeof (hci1394_desc_t);
1926*0Sstevel@tonic-gate 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1927*0Sstevel@tonic-gate 
1928*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1929*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1930*0Sstevel@tonic-gate }
1931*0Sstevel@tonic-gate 
1932*0Sstevel@tonic-gate /*
1933*0Sstevel@tonic-gate  * hci1394_bld_xmit_buf_desc()
1934*0Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor blocks for a transmit buffer.
1935*0Sstevel@tonic-gate  */
1936*0Sstevel@tonic-gate static void
hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t * wvp)1937*0Sstevel@tonic-gate hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp)
1938*0Sstevel@tonic-gate {
1939*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
1940*0Sstevel@tonic-gate 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1941*0Sstevel@tonic-gate 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1942*0Sstevel@tonic-gate 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1943*0Sstevel@tonic-gate 	caddr_t		dma_descp;
1944*0Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
1945*0Sstevel@tonic-gate 	uint32_t	pktsize;
1946*0Sstevel@tonic-gate 	uint32_t	pktcnt;
1947*0Sstevel@tonic-gate 	uint32_t	ii;
1948*0Sstevel@tonic-gate 
1949*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_enter,
1950*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1951*0Sstevel@tonic-gate 
1952*0Sstevel@tonic-gate 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1953*0Sstevel@tonic-gate 
1954*0Sstevel@tonic-gate 	/* determine number and size of pkt desc blocks to create */
1955*0Sstevel@tonic-gate 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
1956*0Sstevel@tonic-gate 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1957*0Sstevel@tonic-gate 
1958*0Sstevel@tonic-gate 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1959*0Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1960*0Sstevel@tonic-gate 
1961*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1962*0Sstevel@tonic-gate 
1963*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_bld_xmit_buf_desc_mem_alloc_fail,
1964*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1965*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1966*0Sstevel@tonic-gate 		    ixl_commandp, wvp->ixl_cur_cmdp);
1967*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
1968*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1969*0Sstevel@tonic-gate 		return;
1970*0Sstevel@tonic-gate 	}
1971*0Sstevel@tonic-gate 
1972*0Sstevel@tonic-gate 	/*
1973*0Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
1974*0Sstevel@tonic-gate 	 * current IXL xfer cmd
1975*0Sstevel@tonic-gate 	 */
1976*0Sstevel@tonic-gate 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1977*0Sstevel@tonic-gate 
1978*0Sstevel@tonic-gate 	/* generate values for the xmit pkt hdrs */
1979*0Sstevel@tonic-gate 	wvp->xfer_pktlen = pktsize;
1980*0Sstevel@tonic-gate 	hci1394_set_xmit_pkt_hdr(wvp);
1981*0Sstevel@tonic-gate 
1982*0Sstevel@tonic-gate 	/*
1983*0Sstevel@tonic-gate 	 * xmit pkt starts with an output more immediate,
1984*0Sstevel@tonic-gate 	 * a double sized hci1394_desc
1985*0Sstevel@tonic-gate 	 */
1986*0Sstevel@tonic-gate 	wv_omi_descp = (hci1394_output_more_imm_t *)
1987*0Sstevel@tonic-gate 	    &wvp->descriptor_block[wvp->descriptors];
1988*0Sstevel@tonic-gate 
1989*0Sstevel@tonic-gate 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1990*0Sstevel@tonic-gate 
1991*0Sstevel@tonic-gate 	wv_omi_descp->data_addr = 0;
1992*0Sstevel@tonic-gate 	wv_omi_descp->branch = 0;
1993*0Sstevel@tonic-gate 	wv_omi_descp->status = 0;
1994*0Sstevel@tonic-gate 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1995*0Sstevel@tonic-gate 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1996*0Sstevel@tonic-gate 	wv_omi_descp->q3 = 0;
1997*0Sstevel@tonic-gate 	wv_omi_descp->q4 = 0;
1998*0Sstevel@tonic-gate 
1999*0Sstevel@tonic-gate 	wvp->descriptors += 2;
2000*0Sstevel@tonic-gate 
2001*0Sstevel@tonic-gate 	/* follow with a single output last descriptor w/status enabled */
2002*0Sstevel@tonic-gate 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
2003*0Sstevel@tonic-gate 	HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
2004*0Sstevel@tonic-gate 	    pktsize);
2005*0Sstevel@tonic-gate 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
2006*0Sstevel@tonic-gate 	wv_descp->branch = 0;
2007*0Sstevel@tonic-gate 	wv_descp->status = 0;
2008*0Sstevel@tonic-gate 	wvp->descriptors++;
2009*0Sstevel@tonic-gate 
2010*0Sstevel@tonic-gate 	/*
2011*0Sstevel@tonic-gate 	 * generate as many contiguous descriptor blocks as there are
2012*0Sstevel@tonic-gate 	 * xmit packets
2013*0Sstevel@tonic-gate 	 */
2014*0Sstevel@tonic-gate 	for (ii = 0; ii < pktcnt; ii++) {
2015*0Sstevel@tonic-gate 
2016*0Sstevel@tonic-gate 		/* if about to create last descriptor block */
2017*0Sstevel@tonic-gate 		if (ii == (pktcnt - 1)) {
2018*0Sstevel@tonic-gate 			/* check and perform any required hci cache flush */
2019*0Sstevel@tonic-gate 			if (hci1394_flush_end_desc_check(wvp, ii) !=
2020*0Sstevel@tonic-gate 			    DDI_SUCCESS) {
2021*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(
2022*0Sstevel@tonic-gate 				    hci1394_bld_xmit_buf_desc_exit,
2023*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2024*0Sstevel@tonic-gate 
2025*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
2026*0Sstevel@tonic-gate 				return;
2027*0Sstevel@tonic-gate 			}
2028*0Sstevel@tonic-gate 		}
2029*0Sstevel@tonic-gate 
2030*0Sstevel@tonic-gate 		/* allocate and copy descriptor block to dma memory */
2031*0Sstevel@tonic-gate 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
2032*0Sstevel@tonic-gate 		    &dma_desc_bound) != DDI_SUCCESS) {
2033*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
2034*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2035*0Sstevel@tonic-gate 
2036*0Sstevel@tonic-gate 			/* wvp->dma_bld_error is set by above call */
2037*0Sstevel@tonic-gate 			return;
2038*0Sstevel@tonic-gate 		}
2039*0Sstevel@tonic-gate 
2040*0Sstevel@tonic-gate 		/*
2041*0Sstevel@tonic-gate 		 * set dma addrs into xfer_ctl structure (unbound addr
2042*0Sstevel@tonic-gate 		 * (kernel virtual) is last component (descriptor))
2043*0Sstevel@tonic-gate 		 */
2044*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_bound = dma_desc_bound;
2045*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_descp = dma_descp + 2 *
2046*0Sstevel@tonic-gate 		    sizeof (hci1394_desc_t);
2047*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
2048*0Sstevel@tonic-gate 
2049*0Sstevel@tonic-gate 		/* advance buffer ptr by pktsize in descriptor block */
2050*0Sstevel@tonic-gate 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
2051*0Sstevel@tonic-gate 		    pktsize;
2052*0Sstevel@tonic-gate 	}
2053*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
2054*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2055*0Sstevel@tonic-gate }
2056*0Sstevel@tonic-gate 
2057*0Sstevel@tonic-gate /*
2058*0Sstevel@tonic-gate  * hci1394_bld_xmit_hdronly_nopkt_desc()
2059*0Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor blocks for transmitting
2060*0Sstevel@tonic-gate  *    a packet consisting of an isochronous header with no data payload,
2061*0Sstevel@tonic-gate  *    or for not sending a packet at all for a cycle.
2062*0Sstevel@tonic-gate  *
2063*0Sstevel@tonic-gate  *    A Store_Value openhci descriptor is built at the start of each
2064*0Sstevel@tonic-gate  *    IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
2065*0Sstevel@tonic-gate  *    descriptor block (to allow for skip cycle specification and set skipmode
2066*0Sstevel@tonic-gate  *    processing for these commands).
2067*0Sstevel@tonic-gate  */
2068*0Sstevel@tonic-gate static void
hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t * wvp)2069*0Sstevel@tonic-gate hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp)
2070*0Sstevel@tonic-gate {
2071*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
2072*0Sstevel@tonic-gate 	hci1394_output_last_t	*wv_ol_descp; /* shorthand to local descrp */
2073*0Sstevel@tonic-gate 	hci1394_output_last_imm_t *wv_oli_descp; /* shorthand to local descrp */
2074*0Sstevel@tonic-gate 	caddr_t		dma_descp;
2075*0Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
2076*0Sstevel@tonic-gate 	uint32_t	repcnt;
2077*0Sstevel@tonic-gate 	uint32_t	ii;
2078*0Sstevel@tonic-gate 
2079*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_enter,
2080*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2081*0Sstevel@tonic-gate 
2082*0Sstevel@tonic-gate 	/* determine # of instances of output hdronly/nopkt to generate */
2083*0Sstevel@tonic-gate 	repcnt = ((ixl1394_xmit_special_t *)wvp->ixl_cur_xfer_stp)->count;
2084*0Sstevel@tonic-gate 
2085*0Sstevel@tonic-gate 	/*
2086*0Sstevel@tonic-gate 	 * allocate an xfer_ctl structure which includes repcnt
2087*0Sstevel@tonic-gate 	 * xfer_ctl_dma structs
2088*0Sstevel@tonic-gate 	 */
2089*0Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, repcnt)) == NULL) {
2090*0Sstevel@tonic-gate 
2091*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2092*0Sstevel@tonic-gate 
2093*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_bld_xmit_hdronly_nopkt_desc_mem_alloc_fail,
2094*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2095*0Sstevel@tonic-gate 		    "IXL EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
2096*0Sstevel@tonic-gate 		    ixl_commandp, wvp->ixl_cur_cmdp);
2097*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2098*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2099*0Sstevel@tonic-gate 		return;
2100*0Sstevel@tonic-gate 	}
2101*0Sstevel@tonic-gate 
2102*0Sstevel@tonic-gate 	/*
2103*0Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
2104*0Sstevel@tonic-gate 	 * current IXL xfer command
2105*0Sstevel@tonic-gate 	 */
2106*0Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
2107*0Sstevel@tonic-gate 
2108*0Sstevel@tonic-gate 	/*
2109*0Sstevel@tonic-gate 	 * create a storevalue descriptor
2110*0Sstevel@tonic-gate 	 * (will be used for skip vs jump processing)
2111*0Sstevel@tonic-gate 	 */
2112*0Sstevel@tonic-gate 	hci1394_set_xmit_storevalue_desc(wvp);
2113*0Sstevel@tonic-gate 
2114*0Sstevel@tonic-gate 	/*
2115*0Sstevel@tonic-gate 	 * processing now based on opcode:
2116*0Sstevel@tonic-gate 	 * IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
2117*0Sstevel@tonic-gate 	 */
2118*0Sstevel@tonic-gate 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & ~IXL1394_OPF_UPDATE) ==
2119*0Sstevel@tonic-gate 	    IXL1394_OP_SEND_HDR_ONLY) {
2120*0Sstevel@tonic-gate 
2121*0Sstevel@tonic-gate 		/* for header only, generate values for the xmit pkt hdrs */
2122*0Sstevel@tonic-gate 		hci1394_set_xmit_pkt_hdr(wvp);
2123*0Sstevel@tonic-gate 
2124*0Sstevel@tonic-gate 		/*
2125*0Sstevel@tonic-gate 		 * create an output last immediate (double sized) descriptor
2126*0Sstevel@tonic-gate 		 * xfer status enabled
2127*0Sstevel@tonic-gate 		 */
2128*0Sstevel@tonic-gate 		wv_oli_descp = (hci1394_output_last_imm_t *)
2129*0Sstevel@tonic-gate 		    &wvp->descriptor_block[wvp->descriptors];
2130*0Sstevel@tonic-gate 
2131*0Sstevel@tonic-gate 		HCI1394_INIT_IT_OLAST_IMM(wv_oli_descp, DESC_HDR_STAT_ENBL,
2132*0Sstevel@tonic-gate 		    DESC_INTR_DSABL);
2133*0Sstevel@tonic-gate 
2134*0Sstevel@tonic-gate 		wv_oli_descp->data_addr = 0;
2135*0Sstevel@tonic-gate 		wv_oli_descp->branch = 0;
2136*0Sstevel@tonic-gate 		wv_oli_descp->status = 0;
2137*0Sstevel@tonic-gate 		wv_oli_descp->q1 = wvp->xmit_pkthdr1;
2138*0Sstevel@tonic-gate 		wv_oli_descp->q2 = wvp->xmit_pkthdr2;
2139*0Sstevel@tonic-gate 		wv_oli_descp->q3 = 0;
2140*0Sstevel@tonic-gate 		wv_oli_descp->q4 = 0;
2141*0Sstevel@tonic-gate 		wvp->descriptors += 2;
2142*0Sstevel@tonic-gate 	} else {
2143*0Sstevel@tonic-gate 		/*
2144*0Sstevel@tonic-gate 		 * for skip cycle, create a single output last descriptor
2145*0Sstevel@tonic-gate 		 * with xfer status enabled
2146*0Sstevel@tonic-gate 		 */
2147*0Sstevel@tonic-gate 		wv_ol_descp = &wvp->descriptor_block[wvp->descriptors];
2148*0Sstevel@tonic-gate 		HCI1394_INIT_IT_OLAST(wv_ol_descp, DESC_HDR_STAT_ENBL,
2149*0Sstevel@tonic-gate 		    DESC_INTR_DSABL, 0);
2150*0Sstevel@tonic-gate 		wv_ol_descp->data_addr = 0;
2151*0Sstevel@tonic-gate 		wv_ol_descp->branch = 0;
2152*0Sstevel@tonic-gate 		wv_ol_descp->status = 0;
2153*0Sstevel@tonic-gate 		wvp->descriptors++;
2154*0Sstevel@tonic-gate 	}
2155*0Sstevel@tonic-gate 
2156*0Sstevel@tonic-gate 	/*
2157*0Sstevel@tonic-gate 	 * generate as many contiguous descriptor blocks as repeat count
2158*0Sstevel@tonic-gate 	 * indicates
2159*0Sstevel@tonic-gate 	 */
2160*0Sstevel@tonic-gate 	for (ii = 0; ii < repcnt; ii++) {
2161*0Sstevel@tonic-gate 
2162*0Sstevel@tonic-gate 		/* if about to create last descriptor block */
2163*0Sstevel@tonic-gate 		if (ii == (repcnt - 1)) {
2164*0Sstevel@tonic-gate 			/* check and perform any required hci cache flush */
2165*0Sstevel@tonic-gate 			if (hci1394_flush_end_desc_check(wvp, ii) !=
2166*0Sstevel@tonic-gate 			    DDI_SUCCESS) {
2167*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(
2168*0Sstevel@tonic-gate 				    hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2169*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2170*0Sstevel@tonic-gate 
2171*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
2172*0Sstevel@tonic-gate 				return;
2173*0Sstevel@tonic-gate 			}
2174*0Sstevel@tonic-gate 		}
2175*0Sstevel@tonic-gate 
2176*0Sstevel@tonic-gate 		/* allocate and copy descriptor block to dma memory */
2177*0Sstevel@tonic-gate 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
2178*0Sstevel@tonic-gate 		    &dma_desc_bound) != DDI_SUCCESS) {
2179*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(
2180*0Sstevel@tonic-gate 			    hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2181*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2182*0Sstevel@tonic-gate 
2183*0Sstevel@tonic-gate 			/* wvp->dma_bld_error is set by above call */
2184*0Sstevel@tonic-gate 			return;
2185*0Sstevel@tonic-gate 		}
2186*0Sstevel@tonic-gate 
2187*0Sstevel@tonic-gate 		/*
2188*0Sstevel@tonic-gate 		 * set dma addrs into xfer_ctl structure (unbound addr
2189*0Sstevel@tonic-gate 		 * (kernel virtual) is last component (descriptor)
2190*0Sstevel@tonic-gate 		 */
2191*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_bound = dma_desc_bound;
2192*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_descp = dma_descp + sizeof (hci1394_desc_t);
2193*0Sstevel@tonic-gate 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
2194*0Sstevel@tonic-gate 	}
2195*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2196*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2197*0Sstevel@tonic-gate }
2198*0Sstevel@tonic-gate 
2199*0Sstevel@tonic-gate /*
2200*0Sstevel@tonic-gate  * hci1394_bld_dma_mem_desc_blk()
2201*0Sstevel@tonic-gate  *    Used to put a given OpenHCI descriptor block into dma bound memory.
2202*0Sstevel@tonic-gate  */
2203*0Sstevel@tonic-gate static int
hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t * wvp,caddr_t * dma_descpp,uint32_t * dma_desc_bound)2204*0Sstevel@tonic-gate hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp, caddr_t *dma_descpp,
2205*0Sstevel@tonic-gate     uint32_t *dma_desc_bound)
2206*0Sstevel@tonic-gate {
2207*0Sstevel@tonic-gate 	uint32_t	dma_bound;
2208*0Sstevel@tonic-gate 
2209*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_enter,
2210*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2211*0Sstevel@tonic-gate 
2212*0Sstevel@tonic-gate 	/* set internal error if no descriptor blocks to build */
2213*0Sstevel@tonic-gate 	if (wvp->descriptors == 0) {
2214*0Sstevel@tonic-gate 
2215*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
2216*0Sstevel@tonic-gate 
2217*0Sstevel@tonic-gate 		TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_error,
2218*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2219*0Sstevel@tonic-gate 		    "IXL1394_INTERNAL_ERROR: no descriptors to build");
2220*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2221*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2222*0Sstevel@tonic-gate 		return (DDI_FAILURE);
2223*0Sstevel@tonic-gate 	}
2224*0Sstevel@tonic-gate 
2225*0Sstevel@tonic-gate 	/* allocate dma memory and move this descriptor block to it */
2226*0Sstevel@tonic-gate 	*dma_descpp = (caddr_t)hci1394_alloc_dma_mem(wvp, wvp->descriptors *
2227*0Sstevel@tonic-gate 	    sizeof (hci1394_desc_t), &dma_bound);
2228*0Sstevel@tonic-gate 
2229*0Sstevel@tonic-gate 	if (*dma_descpp == NULL) {
2230*0Sstevel@tonic-gate 
2231*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2232*0Sstevel@tonic-gate 
2233*0Sstevel@tonic-gate 		TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_fail,
2234*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2235*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: for descriptors");
2236*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2237*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2238*0Sstevel@tonic-gate 		return (DDI_FAILURE);
2239*0Sstevel@tonic-gate 	}
2240*0Sstevel@tonic-gate #ifdef _KERNEL
2241*0Sstevel@tonic-gate 	ddi_rep_put32(wvp->dma_currentp->mem.bi_handle,
2242*0Sstevel@tonic-gate 	    (uint_t *)wvp->descriptor_block, (uint_t *)*dma_descpp,
2243*0Sstevel@tonic-gate 	    wvp->descriptors * (sizeof (hci1394_desc_t) >> 2),
2244*0Sstevel@tonic-gate 	    DDI_DEV_AUTOINCR);
2245*0Sstevel@tonic-gate #else
2246*0Sstevel@tonic-gate 	bcopy(wvp->descriptor_block, *dma_descpp,
2247*0Sstevel@tonic-gate 	    wvp->descriptors * sizeof (hci1394_desc_t));
2248*0Sstevel@tonic-gate #endif
2249*0Sstevel@tonic-gate 	/*
2250*0Sstevel@tonic-gate 	 * convert allocated block's memory address to bus address space
2251*0Sstevel@tonic-gate 	 * include properly set Z bits (descriptor count).
2252*0Sstevel@tonic-gate 	 */
2253*0Sstevel@tonic-gate 	*dma_desc_bound = (dma_bound & ~DESC_Z_MASK) | wvp->descriptors;
2254*0Sstevel@tonic-gate 
2255*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2256*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2257*0Sstevel@tonic-gate 
2258*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
2259*0Sstevel@tonic-gate }
2260*0Sstevel@tonic-gate 
2261*0Sstevel@tonic-gate /*
2262*0Sstevel@tonic-gate  * hci1394_set_xmit_pkt_hdr()
2263*0Sstevel@tonic-gate  *    Compose the 2 quadlets for the xmit packet header.
2264*0Sstevel@tonic-gate  */
2265*0Sstevel@tonic-gate static void
hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t * wvp)2266*0Sstevel@tonic-gate hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp)
2267*0Sstevel@tonic-gate {
2268*0Sstevel@tonic-gate 	uint16_t tag;
2269*0Sstevel@tonic-gate 	uint16_t sync;
2270*0Sstevel@tonic-gate 
2271*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_enter,
2272*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2273*0Sstevel@tonic-gate 
2274*0Sstevel@tonic-gate 	/*
2275*0Sstevel@tonic-gate 	 * choose tag and sync bits for header either from default values or
2276*0Sstevel@tonic-gate 	 * from currently active set tag and sync IXL command
2277*0Sstevel@tonic-gate 	 * (clear command after use)
2278*0Sstevel@tonic-gate 	 */
2279*0Sstevel@tonic-gate 	if (wvp->ixl_settagsync_cmdp == NULL) {
2280*0Sstevel@tonic-gate 		tag = wvp->default_tag;
2281*0Sstevel@tonic-gate 		sync = wvp->default_sync;
2282*0Sstevel@tonic-gate 	} else {
2283*0Sstevel@tonic-gate 		tag = wvp->ixl_settagsync_cmdp->tag;
2284*0Sstevel@tonic-gate 		sync = wvp->ixl_settagsync_cmdp->sync;
2285*0Sstevel@tonic-gate 		wvp->ixl_settagsync_cmdp = NULL;
2286*0Sstevel@tonic-gate 	}
2287*0Sstevel@tonic-gate 	tag &= (DESC_PKT_TAG_MASK >> DESC_PKT_TAG_SHIFT);
2288*0Sstevel@tonic-gate 	sync &= (DESC_PKT_SY_MASK >> DESC_PKT_SY_SHIFT);
2289*0Sstevel@tonic-gate 
2290*0Sstevel@tonic-gate 	/*
2291*0Sstevel@tonic-gate 	 * build xmit pkt header -
2292*0Sstevel@tonic-gate 	 * hdr1 has speed, tag, channel number and sync bits
2293*0Sstevel@tonic-gate 	 * hdr2 has the packet length.
2294*0Sstevel@tonic-gate 	 */
2295*0Sstevel@tonic-gate 	wvp->xmit_pkthdr1 = (wvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
2296*0Sstevel@tonic-gate 	    (tag << DESC_PKT_TAG_SHIFT) | (wvp->ctxtp->isochan <<
2297*0Sstevel@tonic-gate 	    DESC_PKT_CHAN_SHIFT) | (IEEE1394_TCODE_ISOCH <<
2298*0Sstevel@tonic-gate 	    DESC_PKT_TCODE_SHIFT) | (sync << DESC_PKT_SY_SHIFT);
2299*0Sstevel@tonic-gate 
2300*0Sstevel@tonic-gate 	wvp->xmit_pkthdr2 = wvp->xfer_pktlen << DESC_PKT_DATALEN_SHIFT;
2301*0Sstevel@tonic-gate 
2302*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_exit,
2303*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2304*0Sstevel@tonic-gate }
2305*0Sstevel@tonic-gate 
2306*0Sstevel@tonic-gate /*
2307*0Sstevel@tonic-gate  * hci1394_set_xmit_skip_mode()
2308*0Sstevel@tonic-gate  *    Set current skip mode from default or from currently active command.
2309*0Sstevel@tonic-gate  *    If non-default skip mode command's skip mode is skip to label, find
2310*0Sstevel@tonic-gate  *    and set xfer start IXL command which follows skip to label into
2311*0Sstevel@tonic-gate  *    compiler_privatep of set skipmode IXL command.
2312*0Sstevel@tonic-gate  */
2313*0Sstevel@tonic-gate static void
hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t * wvp)2314*0Sstevel@tonic-gate hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp)
2315*0Sstevel@tonic-gate {
2316*0Sstevel@tonic-gate 	int err;
2317*0Sstevel@tonic-gate 
2318*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_enter,
2319*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2320*0Sstevel@tonic-gate 
2321*0Sstevel@tonic-gate 	if (wvp->ixl_setskipmode_cmdp == NULL) {
2322*0Sstevel@tonic-gate 		wvp->skipmode = wvp->default_skipmode;
2323*0Sstevel@tonic-gate 		wvp->skiplabelp = wvp->default_skiplabelp;
2324*0Sstevel@tonic-gate 		wvp->skipxferp = wvp->default_skipxferp;
2325*0Sstevel@tonic-gate 	} else {
2326*0Sstevel@tonic-gate 		wvp->skipmode = wvp->ixl_setskipmode_cmdp->skipmode;
2327*0Sstevel@tonic-gate 		wvp->skiplabelp = wvp->ixl_setskipmode_cmdp->label;
2328*0Sstevel@tonic-gate 		wvp->skipxferp = NULL;
2329*0Sstevel@tonic-gate 		if (wvp->skipmode == IXL1394_SKIP_TO_LABEL) {
2330*0Sstevel@tonic-gate 			err = hci1394_ixl_find_next_exec_xfer(wvp->skiplabelp,
2331*0Sstevel@tonic-gate 			    NULL, &wvp->skipxferp);
2332*0Sstevel@tonic-gate 			if (err == DDI_FAILURE) {
2333*0Sstevel@tonic-gate 				TNF_PROBE_2(hci1394_set_xmit_skip_mode_error,
2334*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
2335*0Sstevel@tonic-gate 				    errmsg, "IXL1394_ENO_DATA_PKTS: "
2336*0Sstevel@tonic-gate 				    "label<->jump loop detected for skiplabel "
2337*0Sstevel@tonic-gate 				    "w/no xfers", tnf_opaque, setskip_cmdp,
2338*0Sstevel@tonic-gate 				    wvp->ixl_setskipmode_cmdp);
2339*0Sstevel@tonic-gate 				wvp->skipxferp = NULL;
2340*0Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
2341*0Sstevel@tonic-gate 			}
2342*0Sstevel@tonic-gate 		}
2343*0Sstevel@tonic-gate 		wvp->ixl_setskipmode_cmdp->compiler_privatep =
2344*0Sstevel@tonic-gate 		    (void *)wvp->skipxferp;
2345*0Sstevel@tonic-gate 	}
2346*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_exit,
2347*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2348*0Sstevel@tonic-gate }
2349*0Sstevel@tonic-gate 
2350*0Sstevel@tonic-gate /*
2351*0Sstevel@tonic-gate  * hci1394_set_xmit_storevalue_desc()
2352*0Sstevel@tonic-gate  *    Set up store_value DMA descriptor.
2353*0Sstevel@tonic-gate  *    XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
2354*0Sstevel@tonic-gate  *    descriptor in the descriptor block (to handle skip mode processing)
2355*0Sstevel@tonic-gate  */
2356*0Sstevel@tonic-gate static void
hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t * wvp)2357*0Sstevel@tonic-gate hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp)
2358*0Sstevel@tonic-gate {
2359*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_enter,
2360*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2361*0Sstevel@tonic-gate 
2362*0Sstevel@tonic-gate 	wvp->descriptors++;
2363*0Sstevel@tonic-gate 
2364*0Sstevel@tonic-gate 	HCI1394_INIT_IT_STORE(&wvp->descriptor_block[wvp->descriptors - 1],
2365*0Sstevel@tonic-gate 	    wvp->storevalue_data);
2366*0Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors - 1].data_addr =
2367*0Sstevel@tonic-gate 	    wvp->storevalue_bufp;
2368*0Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors - 1].branch = 0;
2369*0Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors - 1].status = 0;
2370*0Sstevel@tonic-gate 
2371*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_exit,
2372*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2373*0Sstevel@tonic-gate }
2374*0Sstevel@tonic-gate 
2375*0Sstevel@tonic-gate /*
2376*0Sstevel@tonic-gate  * hci1394_set_next_xfer_buf()
2377*0Sstevel@tonic-gate  *    This routine adds the data buffer to the current wvp list.
2378*0Sstevel@tonic-gate  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2379*0Sstevel@tonic-gate  *    contains the error code.
2380*0Sstevel@tonic-gate  */
2381*0Sstevel@tonic-gate static int
hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t * wvp,uint32_t bufp,uint16_t size)2382*0Sstevel@tonic-gate hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp, uint32_t bufp,
2383*0Sstevel@tonic-gate     uint16_t size)
2384*0Sstevel@tonic-gate {
2385*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_enter,
2386*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2387*0Sstevel@tonic-gate 
2388*0Sstevel@tonic-gate 	/* error if buffer pointer is null (size may be 0) */
2389*0Sstevel@tonic-gate 	if (bufp == NULL) {
2390*0Sstevel@tonic-gate 
2391*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_ENULL_BUFFER_ADDR;
2392*0Sstevel@tonic-gate 
2393*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2394*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2395*0Sstevel@tonic-gate 		return (DDI_FAILURE);
2396*0Sstevel@tonic-gate 	}
2397*0Sstevel@tonic-gate 
2398*0Sstevel@tonic-gate 	/* count new xfer buffer */
2399*0Sstevel@tonic-gate 	wvp->xfer_bufcnt++;
2400*0Sstevel@tonic-gate 
2401*0Sstevel@tonic-gate 	/* error if exceeds maximum xfer buffer components allowed */
2402*0Sstevel@tonic-gate 	if (wvp->xfer_bufcnt > HCI1394_DESC_MAX_Z) {
2403*0Sstevel@tonic-gate 
2404*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
2405*0Sstevel@tonic-gate 
2406*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_set_next_xfer_buf_error,
2407*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2408*0Sstevel@tonic-gate 		    "IXL1394_EFRAGMENT_OFLO", tnf_int, frag_count,
2409*0Sstevel@tonic-gate 		    wvp->xfer_bufcnt);
2410*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2411*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2412*0Sstevel@tonic-gate 		return (DDI_FAILURE);
2413*0Sstevel@tonic-gate 	}
2414*0Sstevel@tonic-gate 
2415*0Sstevel@tonic-gate 	/* save xmit buffer and size */
2416*0Sstevel@tonic-gate 	wvp->xfer_bufp[wvp->xfer_bufcnt - 1] = bufp;
2417*0Sstevel@tonic-gate 	wvp->xfer_size[wvp->xfer_bufcnt - 1] = size;
2418*0Sstevel@tonic-gate 
2419*0Sstevel@tonic-gate 	/* accumulate total packet length */
2420*0Sstevel@tonic-gate 	wvp->xfer_pktlen += size;
2421*0Sstevel@tonic-gate 
2422*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2423*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2424*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
2425*0Sstevel@tonic-gate }
2426*0Sstevel@tonic-gate 
2427*0Sstevel@tonic-gate /*
2428*0Sstevel@tonic-gate  * hci1394_flush_end_desc_check()
2429*0Sstevel@tonic-gate  *    Check if flush required before last descriptor block of a
2430*0Sstevel@tonic-gate  *    non-unary set generated by an xfer buff or xmit special command
2431*0Sstevel@tonic-gate  *    or a unary set provided no other flush has already been done.
2432*0Sstevel@tonic-gate  *
2433*0Sstevel@tonic-gate  *    hci flush is required if xfer is finalized by an updateable
2434*0Sstevel@tonic-gate  *    jump command.
2435*0Sstevel@tonic-gate  *
2436*0Sstevel@tonic-gate  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2437*0Sstevel@tonic-gate  *    will contain the error code.
2438*0Sstevel@tonic-gate  */
2439*0Sstevel@tonic-gate static int
hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t * wvp,uint32_t count)2440*0Sstevel@tonic-gate hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp, uint32_t count)
2441*0Sstevel@tonic-gate {
2442*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_enter,
2443*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2444*0Sstevel@tonic-gate 
2445*0Sstevel@tonic-gate 	if ((count != 0) ||
2446*0Sstevel@tonic-gate 	    ((wvp->xfer_hci_flush & (UPDATEABLE_XFER | UPDATEABLE_SET |
2447*0Sstevel@tonic-gate 		INITIATING_LBL)) == 0)) {
2448*0Sstevel@tonic-gate 
2449*0Sstevel@tonic-gate 		if (wvp->xfer_hci_flush & UPDATEABLE_JUMP) {
2450*0Sstevel@tonic-gate 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
2451*0Sstevel@tonic-gate 
2452*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(
2453*0Sstevel@tonic-gate 				    hci1394_flush_end_desc_check_exit,
2454*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2455*0Sstevel@tonic-gate 
2456*0Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
2457*0Sstevel@tonic-gate 				return (DDI_FAILURE);
2458*0Sstevel@tonic-gate 			}
2459*0Sstevel@tonic-gate 		}
2460*0Sstevel@tonic-gate 	}
2461*0Sstevel@tonic-gate 
2462*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_exit,
2463*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2464*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
2465*0Sstevel@tonic-gate }
2466*0Sstevel@tonic-gate 
2467*0Sstevel@tonic-gate /*
2468*0Sstevel@tonic-gate  * hci1394_flush_hci_cache()
2469*0Sstevel@tonic-gate  *    Sun hci controller (RIO) implementation specific processing!
2470*0Sstevel@tonic-gate  *
2471*0Sstevel@tonic-gate  *    Allocate dma memory for 1 hci descriptor block which will be left unused.
2472*0Sstevel@tonic-gate  *    During execution this will cause a break in the contiguous address space
2473*0Sstevel@tonic-gate  *    processing required by Sun's RIO implementation of the ohci controller and
2474*0Sstevel@tonic-gate  *    will require the controller to refetch the next descriptor block from
2475*0Sstevel@tonic-gate  *    host memory.
2476*0Sstevel@tonic-gate  *
2477*0Sstevel@tonic-gate  *    General rules for cache flush preceeding a descriptor block in dma memory:
2478*0Sstevel@tonic-gate  *    1. Current IXL Xfer Command Updateable Rule:
2479*0Sstevel@tonic-gate  *	    Cache flush of IXL xfer command is required if it, or any of the
2480*0Sstevel@tonic-gate  *	    non-start IXL packet xfer commands associated with it, is flagged
2481*0Sstevel@tonic-gate  *	    updateable.
2482*0Sstevel@tonic-gate  *    2. Next IXL Xfer Command Indeterminate Rule:
2483*0Sstevel@tonic-gate  *	    Cache flush of IXL xfer command is required if an IXL jump command
2484*0Sstevel@tonic-gate  *	    which is flagged updateable has finalized the current IXL xfer
2485*0Sstevel@tonic-gate  *	    command.
2486*0Sstevel@tonic-gate  *    3. Updateable IXL Set Command Rule:
2487*0Sstevel@tonic-gate  *	    Cache flush of an IXL xfer command is required if any of the IXL
2488*0Sstevel@tonic-gate  *	    "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
2489*0Sstevel@tonic-gate  *	    command (i.e. immediately preceeding it), is flagged updateable.
2490*0Sstevel@tonic-gate  *    4. Label Initiating Xfer Command Rule:
2491*0Sstevel@tonic-gate  *	    Cache flush of IXL xfer command is required if it is initiated by a
2492*0Sstevel@tonic-gate  *	    label IXL command.  (This is to allow both a flush of the cache and
2493*0Sstevel@tonic-gate  *	    an interrupt to be generated easily and in close proximity to each
2494*0Sstevel@tonic-gate  *	    other.  This can make possible simpler more successful reset of
2495*0Sstevel@tonic-gate  *	    descriptor statuses, especially under circumstances where the cycle
2496*0Sstevel@tonic-gate  *	    of hci commands is short and/or there are no callbacks distributed
2497*0Sstevel@tonic-gate  *	    through the span of xfers, etc...  This is especially important for
2498*0Sstevel@tonic-gate  *	    input where statuses must be reset before execution cycles back
2499*0Sstevel@tonic-gate  *	    again.
2500*0Sstevel@tonic-gate  *
2501*0Sstevel@tonic-gate  *    Application of above rules:
2502*0Sstevel@tonic-gate  *    Packet mode IXL xfer commands:
2503*0Sstevel@tonic-gate  *	    If any of the above flush rules apply, flush cache should be done
2504*0Sstevel@tonic-gate  *	    immediately preceeding the generation of the dma descriptor block
2505*0Sstevel@tonic-gate  *	    for the packet xfer.
2506*0Sstevel@tonic-gate  *    Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
2507*0Sstevel@tonic-gate  *    SEND_HDR_ONLY, and SEND_NO_PKT):
2508*0Sstevel@tonic-gate  *	    If Rules #1, #3 or #4 applies, a flush cache should be done
2509*0Sstevel@tonic-gate  *	    immediately before the first generated dma descriptor block of the
2510*0Sstevel@tonic-gate  *	    non-packet xfer.
2511*0Sstevel@tonic-gate  *	    If Rule #2 applies, a flush cache should be done immediately before
2512*0Sstevel@tonic-gate  *	    the last generated dma descriptor block of the non-packet xfer.
2513*0Sstevel@tonic-gate  *
2514*0Sstevel@tonic-gate  *    Note: The flush cache should be done at most once in each location that is
2515*0Sstevel@tonic-gate  *    required to be flushed no matter how many rules apply (i.e. only once
2516*0Sstevel@tonic-gate  *    before the first descriptor block and/or only once before the last
2517*0Sstevel@tonic-gate  *    descriptor block generated).  If more than one place requires a flush,
2518*0Sstevel@tonic-gate  *    then both flush operations must be performed.  This is determined by
2519*0Sstevel@tonic-gate  *    taking all rules that apply into account.
2520*0Sstevel@tonic-gate  *
2521*0Sstevel@tonic-gate  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2522*0Sstevel@tonic-gate  *    will contain the error code.
2523*0Sstevel@tonic-gate  */
2524*0Sstevel@tonic-gate static int
hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t * wvp)2525*0Sstevel@tonic-gate hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp)
2526*0Sstevel@tonic-gate {
2527*0Sstevel@tonic-gate 	uint32_t	dma_bound;
2528*0Sstevel@tonic-gate 
2529*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_enter,
2530*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2531*0Sstevel@tonic-gate 
2532*0Sstevel@tonic-gate 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t), &dma_bound) ==
2533*0Sstevel@tonic-gate 	    NULL) {
2534*0Sstevel@tonic-gate 
2535*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2536*0Sstevel@tonic-gate 
2537*0Sstevel@tonic-gate 		TNF_PROBE_1(hci1394_flush_hci_cache_fail,
2538*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2539*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: for flush_hci_cache");
2540*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
2541*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2542*0Sstevel@tonic-gate 		return (DDI_FAILURE);
2543*0Sstevel@tonic-gate 	}
2544*0Sstevel@tonic-gate 
2545*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
2546*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2547*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
2548*0Sstevel@tonic-gate }
2549*0Sstevel@tonic-gate 
2550*0Sstevel@tonic-gate /*
2551*0Sstevel@tonic-gate  * hci1394_alloc_storevalue_dma_mem()
2552*0Sstevel@tonic-gate  *    Allocate dma memory for a 1 hci component descriptor block
2553*0Sstevel@tonic-gate  *    which will be used as the dma memory location that ixl
2554*0Sstevel@tonic-gate  *    compiler generated storevalue descriptor commands will
2555*0Sstevel@tonic-gate  *    specify as location to store their data value.
2556*0Sstevel@tonic-gate  *
2557*0Sstevel@tonic-gate  *    Returns 32-bit bound address of allocated mem, or NULL.
2558*0Sstevel@tonic-gate  */
2559*0Sstevel@tonic-gate static uint32_t
hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t * wvp)2560*0Sstevel@tonic-gate hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp)
2561*0Sstevel@tonic-gate {
2562*0Sstevel@tonic-gate 	uint32_t	dma_bound;
2563*0Sstevel@tonic-gate 
2564*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_enter,
2565*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2566*0Sstevel@tonic-gate 
2567*0Sstevel@tonic-gate 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t),
2568*0Sstevel@tonic-gate 	    &dma_bound) == NULL) {
2569*0Sstevel@tonic-gate 
2570*0Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2571*0Sstevel@tonic-gate 
2572*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_bld_alloc_storevalue_dma_mem_alloc_fail,
2573*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2574*0Sstevel@tonic-gate 		    "IXL1394_EMEM_ALLOC_FAIL: for storevalue dma",
2575*0Sstevel@tonic-gate 		    tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
2576*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
2577*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2578*0Sstevel@tonic-gate 		return (NULL);
2579*0Sstevel@tonic-gate 	}
2580*0Sstevel@tonic-gate 
2581*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
2582*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2583*0Sstevel@tonic-gate 
2584*0Sstevel@tonic-gate 	/* return bound address of allocated memory */
2585*0Sstevel@tonic-gate 	return (dma_bound);
2586*0Sstevel@tonic-gate }
2587*0Sstevel@tonic-gate 
2588*0Sstevel@tonic-gate 
2589*0Sstevel@tonic-gate /*
2590*0Sstevel@tonic-gate  * hci1394_alloc_xfer_ctl()
2591*0Sstevel@tonic-gate  *    Allocate an xfer_ctl structure.
2592*0Sstevel@tonic-gate  */
2593*0Sstevel@tonic-gate static hci1394_xfer_ctl_t *
hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t * wvp,uint32_t dmacnt)2594*0Sstevel@tonic-gate hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp, uint32_t dmacnt)
2595*0Sstevel@tonic-gate {
2596*0Sstevel@tonic-gate 	hci1394_xfer_ctl_t *xcsp;
2597*0Sstevel@tonic-gate 
2598*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_enter,
2599*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2600*0Sstevel@tonic-gate 
2601*0Sstevel@tonic-gate 	/*
2602*0Sstevel@tonic-gate 	 * allocate an xfer_ctl struct which includes dmacnt of
2603*0Sstevel@tonic-gate 	 * xfer_ctl_dma structs
2604*0Sstevel@tonic-gate 	 */
2605*0Sstevel@tonic-gate #ifdef _KERNEL
2606*0Sstevel@tonic-gate 	if ((xcsp = (hci1394_xfer_ctl_t *)kmem_zalloc(
2607*0Sstevel@tonic-gate 	    (sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2608*0Sstevel@tonic-gate 	    sizeof (hci1394_xfer_ctl_dma_t)), KM_NOSLEEP)) == NULL) {
2609*0Sstevel@tonic-gate 
2610*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2611*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2612*0Sstevel@tonic-gate 		return (NULL);
2613*0Sstevel@tonic-gate 	}
2614*0Sstevel@tonic-gate #else
2615*0Sstevel@tonic-gate 	/*
2616*0Sstevel@tonic-gate 	 * This section makes it possible to easily run and test the compiler in
2617*0Sstevel@tonic-gate 	 * user mode.
2618*0Sstevel@tonic-gate 	 */
2619*0Sstevel@tonic-gate 	if ((xcsp = (hci1394_xfer_ctl_t *)calloc(1,
2620*0Sstevel@tonic-gate 	    sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2621*0Sstevel@tonic-gate 	    sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
2622*0Sstevel@tonic-gate 
2623*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2624*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2625*0Sstevel@tonic-gate 		return (NULL);
2626*0Sstevel@tonic-gate 	}
2627*0Sstevel@tonic-gate #endif
2628*0Sstevel@tonic-gate 	/*
2629*0Sstevel@tonic-gate 	 * set dma structure count into allocated xfer_ctl struct for
2630*0Sstevel@tonic-gate 	 * later deletion.
2631*0Sstevel@tonic-gate 	 */
2632*0Sstevel@tonic-gate 	xcsp->cnt = dmacnt;
2633*0Sstevel@tonic-gate 
2634*0Sstevel@tonic-gate 	/* link it to previously allocated xfer_ctl structs or set as first */
2635*0Sstevel@tonic-gate 	if (wvp->xcs_firstp == NULL) {
2636*0Sstevel@tonic-gate 		wvp->xcs_firstp = wvp->xcs_currentp = xcsp;
2637*0Sstevel@tonic-gate 	} else {
2638*0Sstevel@tonic-gate 		wvp->xcs_currentp->ctl_nextp = xcsp;
2639*0Sstevel@tonic-gate 		wvp->xcs_currentp = xcsp;
2640*0Sstevel@tonic-gate 	}
2641*0Sstevel@tonic-gate 
2642*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2643*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2644*0Sstevel@tonic-gate 
2645*0Sstevel@tonic-gate 	/* return allocated xfer_ctl structure */
2646*0Sstevel@tonic-gate 	return (xcsp);
2647*0Sstevel@tonic-gate }
2648*0Sstevel@tonic-gate 
2649*0Sstevel@tonic-gate /*
2650*0Sstevel@tonic-gate  * hci1394_alloc_dma_mem()
2651*0Sstevel@tonic-gate  *	Allocates and binds memory for openHCI DMA descriptors as needed.
2652*0Sstevel@tonic-gate  */
2653*0Sstevel@tonic-gate static void *
hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t * wvp,uint32_t size,uint32_t * dma_bound)2654*0Sstevel@tonic-gate hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp, uint32_t size,
2655*0Sstevel@tonic-gate     uint32_t *dma_bound)
2656*0Sstevel@tonic-gate {
2657*0Sstevel@tonic-gate 	hci1394_idma_desc_mem_t *dma_new;
2658*0Sstevel@tonic-gate 	hci1394_buf_parms_t parms;
2659*0Sstevel@tonic-gate 	hci1394_buf_info_t *memp;
2660*0Sstevel@tonic-gate 	void	*dma_mem_ret;
2661*0Sstevel@tonic-gate 	int	ret;
2662*0Sstevel@tonic-gate 
2663*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_enter,
2664*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2665*0Sstevel@tonic-gate 
2666*0Sstevel@tonic-gate 	/*
2667*0Sstevel@tonic-gate 	 * if no dma has been allocated or current request exceeds
2668*0Sstevel@tonic-gate 	 * remaining memory
2669*0Sstevel@tonic-gate 	 */
2670*0Sstevel@tonic-gate 	if ((wvp->dma_currentp == NULL) ||
2671*0Sstevel@tonic-gate 	    (size > (wvp->dma_currentp->mem.bi_cookie.dmac_size -
2672*0Sstevel@tonic-gate 		wvp->dma_currentp->used))) {
2673*0Sstevel@tonic-gate #ifdef _KERNEL
2674*0Sstevel@tonic-gate 		/* kernel-mode memory allocation for driver */
2675*0Sstevel@tonic-gate 
2676*0Sstevel@tonic-gate 		/* allocate struct to track more dma descriptor memory */
2677*0Sstevel@tonic-gate 		if ((dma_new = (hci1394_idma_desc_mem_t *)
2678*0Sstevel@tonic-gate 		    kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
2679*0Sstevel@tonic-gate 		    KM_NOSLEEP)) == NULL) {
2680*0Sstevel@tonic-gate 
2681*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2682*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2683*0Sstevel@tonic-gate 			return (NULL);
2684*0Sstevel@tonic-gate 		}
2685*0Sstevel@tonic-gate 
2686*0Sstevel@tonic-gate 		/*
2687*0Sstevel@tonic-gate 		 * if more cookies available from the current mem, try to find
2688*0Sstevel@tonic-gate 		 * one of suitable size. Cookies that are too small will be
2689*0Sstevel@tonic-gate 		 * skipped and unused. Given that cookie size is always at least
2690*0Sstevel@tonic-gate 		 * 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
2691*0Sstevel@tonic-gate 		 * it's a small price to pay for code simplicity.
2692*0Sstevel@tonic-gate 		 */
2693*0Sstevel@tonic-gate 		if (wvp->dma_currentp != NULL) {
2694*0Sstevel@tonic-gate 			/* new struct is derived from current */
2695*0Sstevel@tonic-gate 			memp = &wvp->dma_currentp->mem;
2696*0Sstevel@tonic-gate 			dma_new->mem = *memp;
2697*0Sstevel@tonic-gate 			dma_new->offset = wvp->dma_currentp->offset +
2698*0Sstevel@tonic-gate 			    memp->bi_cookie.dmac_size;
2699*0Sstevel@tonic-gate 
2700*0Sstevel@tonic-gate 			for (; memp->bi_cookie_count > 1;
2701*0Sstevel@tonic-gate 			    memp->bi_cookie_count--) {
2702*0Sstevel@tonic-gate 				ddi_dma_nextcookie(memp->bi_dma_handle,
2703*0Sstevel@tonic-gate 				    &dma_new->mem.bi_cookie);
2704*0Sstevel@tonic-gate 
2705*0Sstevel@tonic-gate 				if (dma_new->mem.bi_cookie.dmac_size >= size) {
2706*0Sstevel@tonic-gate 					dma_new->mem_handle =
2707*0Sstevel@tonic-gate 					    wvp->dma_currentp->mem_handle;
2708*0Sstevel@tonic-gate 					wvp->dma_currentp->mem_handle = NULL;
2709*0Sstevel@tonic-gate 					dma_new->mem.bi_cookie_count--;
2710*0Sstevel@tonic-gate 					break;
2711*0Sstevel@tonic-gate 				}
2712*0Sstevel@tonic-gate 				dma_new->offset +=
2713*0Sstevel@tonic-gate 				    dma_new->mem.bi_cookie.dmac_size;
2714*0Sstevel@tonic-gate 			}
2715*0Sstevel@tonic-gate 		}
2716*0Sstevel@tonic-gate 
2717*0Sstevel@tonic-gate 		/* if no luck with current buffer, allocate a new one */
2718*0Sstevel@tonic-gate 		if (dma_new->mem_handle == NULL) {
2719*0Sstevel@tonic-gate 			parms.bp_length = HCI1394_IXL_PAGESIZE;
2720*0Sstevel@tonic-gate 			parms.bp_max_cookies = OHCI_MAX_COOKIE;
2721*0Sstevel@tonic-gate 			parms.bp_alignment = 16;
2722*0Sstevel@tonic-gate 			ret = hci1394_buf_alloc(&wvp->soft_statep->drvinfo,
2723*0Sstevel@tonic-gate 			    &parms, &dma_new->mem, &dma_new->mem_handle);
2724*0Sstevel@tonic-gate 			if (ret != DDI_SUCCESS) {
2725*0Sstevel@tonic-gate 				kmem_free(dma_new,
2726*0Sstevel@tonic-gate 				    sizeof (hci1394_idma_desc_mem_t));
2727*0Sstevel@tonic-gate 
2728*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2729*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2730*0Sstevel@tonic-gate 				return (NULL);
2731*0Sstevel@tonic-gate 			}
2732*0Sstevel@tonic-gate 
2733*0Sstevel@tonic-gate 			/* paranoia: this is not supposed to happen */
2734*0Sstevel@tonic-gate 			if (dma_new->mem.bi_cookie.dmac_size < size) {
2735*0Sstevel@tonic-gate 				hci1394_buf_free(&dma_new->mem_handle);
2736*0Sstevel@tonic-gate 				kmem_free(dma_new,
2737*0Sstevel@tonic-gate 				    sizeof (hci1394_idma_desc_mem_t));
2738*0Sstevel@tonic-gate 
2739*0Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2740*0Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2741*0Sstevel@tonic-gate 				return (NULL);
2742*0Sstevel@tonic-gate 			}
2743*0Sstevel@tonic-gate 			dma_new->offset = 0;
2744*0Sstevel@tonic-gate 		}
2745*0Sstevel@tonic-gate #else
2746*0Sstevel@tonic-gate 		/* user-mode memory allocation for user mode compiler tests */
2747*0Sstevel@tonic-gate 		/* allocate another dma_desc_mem struct */
2748*0Sstevel@tonic-gate 		if ((dma_new = (hci1394_idma_desc_mem_t *)
2749*0Sstevel@tonic-gate 			calloc(1, sizeof (hci1394_idma_desc_mem_t))) == NULL) {
2750*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2751*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2752*0Sstevel@tonic-gate 			return (NULL);
2753*0Sstevel@tonic-gate 		}
2754*0Sstevel@tonic-gate 		dma_new->mem.bi_dma_handle = NULL;
2755*0Sstevel@tonic-gate 		dma_new->mem.bi_handle = NULL;
2756*0Sstevel@tonic-gate 		if ((dma_new->mem.bi_kaddr = (caddr_t)calloc(1,
2757*0Sstevel@tonic-gate 			    HCI1394_IXL_PAGESIZE)) == NULL) {
2758*0Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2759*0Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2760*0Sstevel@tonic-gate 			return (NULL);
2761*0Sstevel@tonic-gate 		}
2762*0Sstevel@tonic-gate 		dma_new->mem.bi_cookie.dmac_address =
2763*0Sstevel@tonic-gate 		    (unsigned long)dma_new->mem.bi_kaddr;
2764*0Sstevel@tonic-gate 		dma_new->mem.bi_real_length = HCI1394_IXL_PAGESIZE;
2765*0Sstevel@tonic-gate 		dma_new->mem.bi_cookie_count = 1;
2766*0Sstevel@tonic-gate #endif
2767*0Sstevel@tonic-gate 
2768*0Sstevel@tonic-gate 		/* if this is not first dma_desc_mem, link last one to it */
2769*0Sstevel@tonic-gate 		if (wvp->dma_currentp != NULL) {
2770*0Sstevel@tonic-gate 			wvp->dma_currentp->dma_nextp = dma_new;
2771*0Sstevel@tonic-gate 			wvp->dma_currentp = dma_new;
2772*0Sstevel@tonic-gate 		} else {
2773*0Sstevel@tonic-gate 			/* else set it as first one */
2774*0Sstevel@tonic-gate 			wvp->dma_currentp = wvp->dma_firstp = dma_new;
2775*0Sstevel@tonic-gate 		}
2776*0Sstevel@tonic-gate 	}
2777*0Sstevel@tonic-gate 
2778*0Sstevel@tonic-gate 	/* now allocate requested memory from current block */
2779*0Sstevel@tonic-gate 	dma_mem_ret = wvp->dma_currentp->mem.bi_kaddr +
2780*0Sstevel@tonic-gate 	    wvp->dma_currentp->offset + wvp->dma_currentp->used;
2781*0Sstevel@tonic-gate 	*dma_bound = wvp->dma_currentp->mem.bi_cookie.dmac_address +
2782*0Sstevel@tonic-gate 	    wvp->dma_currentp->used;
2783*0Sstevel@tonic-gate 	wvp->dma_currentp->used += size;
2784*0Sstevel@tonic-gate 
2785*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2786*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2787*0Sstevel@tonic-gate 	return (dma_mem_ret);
2788*0Sstevel@tonic-gate }
2789*0Sstevel@tonic-gate 
2790*0Sstevel@tonic-gate 
2791*0Sstevel@tonic-gate /*
2792*0Sstevel@tonic-gate  * hci1394_is_opcode_valid()
2793*0Sstevel@tonic-gate  *    given an ixl opcode, this routine returns B_TRUE if it is a
2794*0Sstevel@tonic-gate  *    recognized opcode and B_FALSE if it is not recognized.
2795*0Sstevel@tonic-gate  *    Note that the FULL 16 bits of the opcode are checked which includes
2796*0Sstevel@tonic-gate  *    various flags and not just the low order 8 bits of unique code.
2797*0Sstevel@tonic-gate  */
2798*0Sstevel@tonic-gate static boolean_t
hci1394_is_opcode_valid(uint16_t ixlopcode)2799*0Sstevel@tonic-gate hci1394_is_opcode_valid(uint16_t ixlopcode)
2800*0Sstevel@tonic-gate {
2801*0Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
2802*0Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2803*0Sstevel@tonic-gate 
2804*0Sstevel@tonic-gate 	/* if it's not one we know about, then it's bad */
2805*0Sstevel@tonic-gate 	switch (ixlopcode) {
2806*0Sstevel@tonic-gate 	case IXL1394_OP_LABEL:
2807*0Sstevel@tonic-gate 	case IXL1394_OP_JUMP:
2808*0Sstevel@tonic-gate 	case IXL1394_OP_CALLBACK:
2809*0Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT:
2810*0Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT_ST:
2811*0Sstevel@tonic-gate 	case IXL1394_OP_RECV_BUF:
2812*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT:
2813*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_ST:
2814*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_WHDR_ST:
2815*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_BUF:
2816*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_HDR_ONLY:
2817*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_NO_PKT:
2818*0Sstevel@tonic-gate 	case IXL1394_OP_STORE_TIMESTAMP:
2819*0Sstevel@tonic-gate 	case IXL1394_OP_SET_TAGSYNC:
2820*0Sstevel@tonic-gate 	case IXL1394_OP_SET_SKIPMODE:
2821*0Sstevel@tonic-gate 	case IXL1394_OP_SET_SYNCWAIT:
2822*0Sstevel@tonic-gate 	case IXL1394_OP_JUMP_U:
2823*0Sstevel@tonic-gate 	case IXL1394_OP_CALLBACK_U:
2824*0Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT_U:
2825*0Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT_ST_U:
2826*0Sstevel@tonic-gate 	case IXL1394_OP_RECV_BUF_U:
2827*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_U:
2828*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_ST_U:
2829*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_WHDR_ST_U:
2830*0Sstevel@tonic-gate 	case IXL1394_OP_SEND_BUF_U:
2831*0Sstevel@tonic-gate 	case IXL1394_OP_SET_TAGSYNC_U:
2832*0Sstevel@tonic-gate 	case IXL1394_OP_SET_SKIPMODE_U:
2833*0Sstevel@tonic-gate 		TNF_PROBE_1_DEBUG(hci1394_is_opcode_valid_enter,
2834*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
2835*0Sstevel@tonic-gate 		    "ixl opcode is valid");
2836*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
2837*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2838*0Sstevel@tonic-gate 		return (B_TRUE);
2839*0Sstevel@tonic-gate 	default:
2840*0Sstevel@tonic-gate 		TNF_PROBE_2(hci1394_is_opcode_valid_enter,
2841*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
2842*0Sstevel@tonic-gate 		    "ixl opcode is NOT valid", tnf_opaque, ixl_opcode,
2843*0Sstevel@tonic-gate 		    ixlopcode);
2844*0Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_is_opcode_valid_enter,
2845*0Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2846*0Sstevel@tonic-gate 		return (B_FALSE);
2847*0Sstevel@tonic-gate 	}
2848*0Sstevel@tonic-gate }
2849