xref: /netbsd-src/sys/dev/ic/aic79xx_inline.h (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: aic79xx_inline.h,v 1.2 2003/05/03 18:11:13 wiz Exp $	*/
2 
3 /*
4  * Inline routines shareable across OS platforms.
5  *
6  * Copyright (c) 1994-2001 Justin T. Gibbs.
7  * Copyright (c) 2000-2003 Adaptec Inc.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17  *    substantially similar to the "NO WARRANTY" disclaimer below
18  *    ("Disclaimer") and any redistribution must be conditioned upon
19  *    including a substantially similar Disclaimer requirement for further
20  *    binary redistribution.
21  * 3. Neither the names of the above-listed copyright holders nor the names
22  *    of any contributors may be used to endorse or promote products derived
23  *    from this software without specific prior written permission.
24  *
25  * Alternatively, this software may be distributed under the terms of the
26  * GNU General Public License ("GPL") version 2 as published by the Free
27  * Software Foundation.
28  *
29  * NO WARRANTY
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
39  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40  * POSSIBILITY OF SUCH DAMAGES.
41  *
42  * //depot/aic7xxx/aic7xxx/aic79xx_inline.h#44 $
43  *
44  * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_inline.h,v 1.8 2003/03/06 23:58:34 gibbs Exp $
45  */
46 /*
47  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
48  */
49 
50 #ifndef _AIC79XX_INLINE_H_
51 #define _AIC79XX_INLINE_H_
52 
53 /******************************** Debugging ***********************************/
54 static __inline char *ahd_name(struct ahd_softc *ahd);
55 
56 static __inline char *
57 ahd_name(struct ahd_softc *ahd)
58 {
59 	return (ahd->name);
60 }
61 
62 /************************ Sequencer Execution Control *************************/
63 static __inline void ahd_known_modes(struct ahd_softc *ahd,
64 				     ahd_mode src, ahd_mode dst);
65 static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
66 						    ahd_mode src,
67 						    ahd_mode dst);
68 static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
69 					    ahd_mode_state state,
70 					    ahd_mode *src, ahd_mode *dst);
71 static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
72 				   ahd_mode dst);
73 static __inline void ahd_update_modes(struct ahd_softc *ahd);
74 static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
75 				      ahd_mode dstmode, const char *file,
76 				      int line);
77 static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
78 static __inline void ahd_restore_modes(struct ahd_softc *ahd,
79 				       ahd_mode_state state);
80 static __inline int  ahd_is_paused(struct ahd_softc *ahd);
81 static __inline void ahd_pause(struct ahd_softc *ahd);
82 static __inline void ahd_unpause(struct ahd_softc *ahd);
83 
84 static __inline void
85 ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
86 {
87 	ahd->src_mode = src;
88 	ahd->dst_mode = dst;
89 	ahd->saved_src_mode = src;
90 	ahd->saved_dst_mode = dst;
91 }
92 
93 static __inline ahd_mode_state
94 ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
95 {
96 	return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT));
97 }
98 
99 static __inline void
100 ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
101 		       ahd_mode *src, ahd_mode *dst)
102 {
103 	*src = (state & SRC_MODE) >> SRC_MODE_SHIFT;
104 	*dst = (state & DST_MODE) >> DST_MODE_SHIFT;
105 }
106 
107 static __inline void
108 ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
109 {
110 	if (ahd->src_mode == src && ahd->dst_mode == dst)
111 		return;
112 #ifdef AHD_DEBUG
113 	if (ahd->src_mode == AHD_MODE_UNKNOWN
114 	 || ahd->dst_mode == AHD_MODE_UNKNOWN)
115 		panic("Setting mode prior to saving it.\n");
116 	if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
117 		printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
118 		       ahd_build_mode_state(ahd, src, dst));
119 #endif
120 	ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
121 	ahd->src_mode = src;
122 	ahd->dst_mode = dst;
123 }
124 
125 static __inline void
126 ahd_update_modes(struct ahd_softc *ahd)
127 {
128 	ahd_mode_state mode_ptr;
129 	ahd_mode src;
130 	ahd_mode dst;
131 
132 	mode_ptr = ahd_inb(ahd, MODE_PTR);
133 #ifdef AHD_DEBUG
134 	if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
135 		printf("Reading mode 0x%x\n", mode_ptr);
136 #endif
137 	ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
138 	ahd_known_modes(ahd, src, dst);
139 }
140 
141 static __inline void
142 ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
143 		 ahd_mode dstmode, const char *file, int line)
144 {
145 #ifdef AHD_DEBUG
146 	if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
147 	 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
148 		panic("%s:%s:%d: Mode assertion failed.\n",
149 		       ahd_name(ahd), file, line);
150 	}
151 #endif
152 }
153 
154 static __inline ahd_mode_state
155 ahd_save_modes(struct ahd_softc *ahd)
156 {
157 	if (ahd->src_mode == AHD_MODE_UNKNOWN
158 	 || ahd->dst_mode == AHD_MODE_UNKNOWN)
159 		ahd_update_modes(ahd);
160 
161 	return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
162 }
163 
164 static __inline void
165 ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
166 {
167 	ahd_mode src;
168 	ahd_mode dst;
169 
170 	ahd_extract_mode_state(ahd, state, &src, &dst);
171 	ahd_set_modes(ahd, src, dst);
172 }
173 
174 #define AHD_ASSERT_MODES(ahd, source, dest) \
175 	ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
176 
177 /*
178  * Determine whether the sequencer has halted code execution.
179  * Returns non-zero status if the sequencer is stopped.
180  */
181 static __inline int
182 ahd_is_paused(struct ahd_softc *ahd)
183 {
184 	return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
185 }
186 
187 /*
188  * Request that the sequencer stop and wait, indefinitely, for it
189  * to stop.  The sequencer will only acknowledge that it is paused
190  * once it has reached an instruction boundary and PAUSEDIS is
191  * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
192  * for critical sections.
193  */
194 static __inline void
195 ahd_pause(struct ahd_softc *ahd)
196 {
197 	ahd_outb(ahd, HCNTRL, ahd->pause);
198 
199 	/*
200 	 * Since the sequencer can disable pausing in a critical section, we
201 	 * must loop until it actually stops.
202 	 */
203 	while (ahd_is_paused(ahd) == 0)
204 		;
205 }
206 
207 /*
208  * Allow the sequencer to continue program execution.
209  * We check here to ensure that no additional interrupt
210  * sources that would cause the sequencer to halt have been
211  * asserted.  If, for example, a SCSI bus reset is detected
212  * while we are fielding a different, pausing, interrupt type,
213  * we don't want to release the sequencer before going back
214  * into our interrupt handler and dealing with this new
215  * condition.
216  */
217 static __inline void
218 ahd_unpause(struct ahd_softc *ahd)
219 {
220 	/*
221 	 * Automatically restore our modes to those saved
222 	 * prior to the first change of the mode.
223 	 */
224 	if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
225 	 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
226 		if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
227 			ahd_reset_cmds_pending(ahd);
228 		ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
229 	}
230 
231 	if ((ahd_inb(ahd, INTSTAT) & ~(SWTMINT | CMDCMPLT)) == 0)
232 		ahd_outb(ahd, HCNTRL, ahd->unpause);
233 
234 	ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
235 }
236 
237 /*********************** Scatter Gather List Handling *************************/
238 static __inline void	*ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
239 				      void *sgptr, bus_addr_t addr,
240 				      bus_size_t len, int last);
241 static __inline void	 ahd_setup_scb_common(struct ahd_softc *ahd,
242 					      struct scb *scb);
243 static __inline void	 ahd_setup_data_scb(struct ahd_softc *ahd,
244 					    struct scb *scb);
245 static __inline void	 ahd_setup_noxfer_scb(struct ahd_softc *ahd,
246 					      struct scb *scb);
247 
248 static __inline void *
249 ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
250 	     void *sgptr, bus_addr_t addr, bus_size_t len, int last)
251 {
252 	scb->sg_count++;
253 	if (sizeof(bus_addr_t) > 4
254 	 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
255 		struct ahd_dma64_seg *sg;
256 
257 		sg = (struct ahd_dma64_seg *)sgptr;
258 		sg->addr = ahd_htole64(addr);
259 		sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
260 		return (sg + 1);
261 	} else {
262 		struct ahd_dma_seg *sg;
263 
264 		sg = (struct ahd_dma_seg *)sgptr;
265 		sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
266 		sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
267 				    | (last ? AHD_DMA_LAST_SEG : 0));
268 		return (sg + 1);
269 	}
270 }
271 
272 static __inline void
273 ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
274 {
275 	/* XXX Handle target mode SCBs. */
276 	scb->crc_retry_count = 0;
277 	if ((scb->flags & SCB_PACKETIZED) != 0) {
278 		/* XXX what about ACA??  It is type 4, but TAG_TYPE == 0x3. */
279 		scb->hscb->task_attribute= scb->hscb->control & SCB_TAG_TYPE;
280 		/*
281 		 * For Rev A short lun workaround.
282 		 */
283 		memset(scb->hscb->pkt_long_lun, 0, sizeof(scb->hscb->pkt_long_lun));
284 		scb->hscb->pkt_long_lun[6] = scb->hscb->lun;
285 	}
286 
287 	if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
288 	 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
289 		scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
290 		    ahd_htole32(scb->sense_busaddr);
291 }
292 
293 static __inline void
294 ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
295 {
296 	/*
297 	 * Copy the first SG into the "current" data ponter area.
298 	 */
299 	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
300 		struct ahd_dma64_seg *sg;
301 
302 		sg = (struct ahd_dma64_seg *)scb->sg_list;
303 		scb->hscb->dataptr = sg->addr;
304 		scb->hscb->datacnt = sg->len;
305 	} else {
306 		struct ahd_dma_seg *sg;
307 
308 		sg = (struct ahd_dma_seg *)scb->sg_list;
309 		scb->hscb->dataptr = sg->addr;
310 		if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
311 			uint64_t high_addr;
312 
313 			high_addr = ahd_le32toh(sg->len) & 0x7F000000;
314 			scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
315 		}
316 		scb->hscb->datacnt = sg->len;
317 	}
318 	/*
319 	 * Note where to find the SG entries in bus space.
320 	 * We also set the full residual flag which the
321 	 * sequencer will clear as soon as a data transfer
322 	 * occurs.
323 	 */
324 	scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
325 }
326 
327 static __inline void
328 ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
329 {
330 	scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
331 	scb->hscb->dataptr = 0;
332 	scb->hscb->datacnt = 0;
333 }
334 
335 /************************** Memory mapping routines ***************************/
336 static __inline size_t	ahd_sg_size(struct ahd_softc *ahd);
337 static __inline void *
338 			ahd_sg_bus_to_virt(struct ahd_softc *ahd,
339 					   struct scb *scb,
340 					   uint32_t sg_busaddr);
341 static __inline uint32_t
342 			ahd_sg_virt_to_bus(struct ahd_softc *ahd,
343 					   struct scb *scb,
344 					   void *sg);
345 static __inline void	ahd_sync_scb(struct ahd_softc *ahd,
346 				     struct scb *scb, int op);
347 static __inline void	ahd_sync_sglist(struct ahd_softc *ahd,
348 					struct scb *scb, int op);
349 static __inline void	ahd_sync_sense(struct ahd_softc *ahd,
350 				       struct scb *scb, int op);
351 static __inline uint32_t
352 			ahd_targetcmd_offset(struct ahd_softc *ahd,
353 					     u_int index);
354 
355 static __inline size_t
356 ahd_sg_size(struct ahd_softc *ahd)
357 {
358 	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
359 		return (sizeof(struct ahd_dma64_seg));
360 	return (sizeof(struct ahd_dma_seg));
361 }
362 
363 static __inline void *
364 ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
365 {
366 	bus_addr_t sg_offset;
367 
368 	/* sg_list_phys points to entry 1, not 0 */
369 	sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
370 	return ((uint8_t *)scb->sg_list + sg_offset);
371 }
372 
373 static __inline uint32_t
374 ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
375 {
376 	bus_addr_t sg_offset;
377 
378 	/* sg_list_phys points to entry 1, not 0 */
379 	sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
380 		  - ahd_sg_size(ahd);
381 
382 	return (scb->sg_list_busaddr + sg_offset);
383 }
384 
385 static __inline void
386 ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
387 {
388 	ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->hscb_map->dmamap,
389 			/*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
390 			/*len*/sizeof(*scb->hscb), op);
391 }
392 
393 static __inline void
394 ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
395 {
396 	if (scb->sg_count == 0)
397 		return;
398 
399 	ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->sg_map->dmamap,
400 			/*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
401 			/*len*/ahd_sg_size(ahd) * scb->sg_count, op);
402 }
403 
404 static __inline void
405 ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
406 {
407 	ahd_dmamap_sync(ahd, ahd->parent_dmat,
408 			scb->sense_map->dmamap,
409 			/*offset*/scb->sense_busaddr,
410 			/*len*/AHD_SENSE_BUFSIZE, op);
411 }
412 
413 static __inline uint32_t
414 ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
415 {
416 	return (((uint8_t *)&ahd->targetcmds[index])
417 	       - (uint8_t *)ahd->qoutfifo);
418 }
419 
420 /*********************** Miscelaneous Support Functions ***********************/
421 static __inline void	ahd_complete_scb(struct ahd_softc *ahd,
422 					 struct scb *scb);
423 static __inline void	ahd_update_residual(struct ahd_softc *ahd,
424 					    struct scb *scb);
425 static __inline struct ahd_initiator_tinfo *
426 			ahd_fetch_transinfo(struct ahd_softc *ahd,
427 					    char channel, u_int our_id,
428 					    u_int remote_id,
429 					    struct ahd_tmode_tstate **tstate);
430 static __inline uint16_t
431 			ahd_inw(struct ahd_softc *ahd, u_int port);
432 static __inline void	ahd_outw(struct ahd_softc *ahd, u_int port,
433 				 u_int value);
434 static __inline uint32_t
435 			ahd_inl(struct ahd_softc *ahd, u_int port);
436 static __inline void	ahd_outl(struct ahd_softc *ahd, u_int port,
437 				 uint32_t value);
438 static __inline uint64_t
439 			ahd_inq(struct ahd_softc *ahd, u_int port);
440 static __inline void	ahd_outq(struct ahd_softc *ahd, u_int port,
441 				 uint64_t value);
442 static __inline u_int	ahd_get_scbptr(struct ahd_softc *ahd);
443 static __inline void	ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
444 static __inline u_int	ahd_get_hnscb_qoff(struct ahd_softc *ahd);
445 static __inline void	ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value);
446 static __inline u_int	ahd_get_hescb_qoff(struct ahd_softc *ahd);
447 static __inline void	ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value);
448 static __inline u_int	ahd_get_snscb_qoff(struct ahd_softc *ahd);
449 static __inline void	ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value);
450 static __inline u_int	ahd_get_sescb_qoff(struct ahd_softc *ahd);
451 static __inline void	ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
452 static __inline u_int	ahd_get_sdscb_qoff(struct ahd_softc *ahd);
453 static __inline void	ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
454 static __inline u_int	ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
455 static __inline u_int	ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
456 static __inline uint32_t
457 			ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
458 static __inline void	ahd_swap_with_next_hscb(struct ahd_softc *ahd,
459 						struct scb *scb);
460 static __inline void	ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
461 static __inline uint8_t *
462 			ahd_get_sense_buf(struct ahd_softc *ahd,
463 					  struct scb *scb);
464 static __inline uint32_t
465 			ahd_get_sense_bufaddr(struct ahd_softc *ahd,
466 					      struct scb *scb);
467 static __inline void	ahd_post_scb(struct ahd_softc *ahd,
468 					 struct scb *scb);
469 
470 
471 static __inline void
472 ahd_post_scb(struct ahd_softc *ahd, struct scb *scb)
473 {
474 	uint32_t sgptr;
475 
476 	sgptr = ahd_le32toh(scb->hscb->sgptr);
477 	if ((sgptr & SG_STATUS_VALID) != 0)
478 		ahd_handle_scb_status(ahd, scb);
479 	else
480         	ahd_done(ahd, scb);
481 }
482 
483 static __inline void
484 ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb)
485 {
486 	uint32_t sgptr;
487 
488 	sgptr = ahd_le32toh(scb->hscb->sgptr);
489 	if ((sgptr & SG_STATUS_VALID) != 0)
490 		ahd_handle_scb_status(ahd, scb);
491 	else
492 		ahd_done(ahd, scb);
493 }
494 
495 /*
496  * Determine whether the sequencer reported a residual
497  * for this SCB/transaction.
498  */
499 static __inline void
500 ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
501 {
502 	uint32_t sgptr;
503 
504 	sgptr = ahd_le32toh(scb->hscb->sgptr);
505 	if ((sgptr & SG_STATUS_VALID) != 0)
506 		ahd_calc_residual(ahd, scb);
507 }
508 
509 /*
510  * Return pointers to the transfer negotiation information
511  * for the specified our_id/remote_id pair.
512  */
513 static __inline struct ahd_initiator_tinfo *
514 ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
515 		    u_int remote_id, struct ahd_tmode_tstate **tstate)
516 {
517 	/*
518 	 * Transfer data structures are stored from the perspective
519 	 * of the target role.  Since the parameters for a connection
520 	 * in the initiator role to a given target are the same as
521 	 * when the roles are reversed, we pretend we are the target.
522 	 */
523 	if (channel == 'B')
524 		our_id += 8;
525 	*tstate = ahd->enabled_targets[our_id];
526 	return (&(*tstate)->transinfo[remote_id]);
527 }
528 
529 #define AHD_COPY_COL_IDX(dst, src)				\
530 do {								\
531 	dst->hscb->scsiid = src->hscb->scsiid;			\
532 	dst->hscb->lun = src->hscb->lun;			\
533 } while (0)
534 
535 static __inline uint16_t
536 ahd_inw(struct ahd_softc *ahd, u_int port)
537 {
538 	return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port));
539 }
540 
541 static __inline void
542 ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
543 {
544 	ahd_outb(ahd, port, value & 0xFF);
545 	ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
546 }
547 
548 static __inline uint32_t
549 ahd_inl(struct ahd_softc *ahd, u_int port)
550 {
551 	return ((ahd_inb(ahd, port))
552 	      | (ahd_inb(ahd, port+1) << 8)
553 	      | (ahd_inb(ahd, port+2) << 16)
554 	      | (ahd_inb(ahd, port+3) << 24));
555 }
556 
557 static __inline void
558 ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
559 {
560 	ahd_outb(ahd, port, (value) & 0xFF);
561 	ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
562 	ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
563 	ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
564 }
565 
566 static __inline uint64_t
567 ahd_inq(struct ahd_softc *ahd, u_int port)
568 {
569 	return ((ahd_inb(ahd, port))
570 	      | (ahd_inb(ahd, port+1) << 8)
571 	      | (ahd_inb(ahd, port+2) << 16)
572 	      | (ahd_inb(ahd, port+3) << 24)
573 	      | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
574 	      | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
575 	      | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
576 	      | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
577 }
578 
579 static __inline void
580 ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
581 {
582 	ahd_outb(ahd, port, value & 0xFF);
583 	ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
584 	ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
585 	ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
586 	ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
587 	ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
588 	ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
589 	ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
590 }
591 
592 static __inline u_int
593 ahd_get_scbptr(struct ahd_softc *ahd)
594 {
595 	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
596 			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
597 	return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
598 }
599 
600 static __inline void
601 ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
602 {
603 	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
604 			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
605 	ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
606 	ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
607 }
608 
609 static __inline u_int
610 ahd_get_hnscb_qoff(struct ahd_softc *ahd)
611 {
612 	return (ahd_inw_atomic(ahd, HNSCB_QOFF));
613 }
614 
615 static __inline void
616 ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
617 {
618 	ahd_outw_atomic(ahd, HNSCB_QOFF, value);
619 }
620 
621 static __inline u_int
622 ahd_get_hescb_qoff(struct ahd_softc *ahd)
623 {
624 	return (ahd_inb(ahd, HESCB_QOFF));
625 }
626 
627 static __inline void
628 ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
629 {
630 	ahd_outb(ahd, HESCB_QOFF, value);
631 }
632 
633 static __inline u_int
634 ahd_get_snscb_qoff(struct ahd_softc *ahd)
635 {
636 	u_int oldvalue;
637 
638 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
639 	oldvalue = ahd_inw(ahd, SNSCB_QOFF);
640 	ahd_outw(ahd, SNSCB_QOFF, oldvalue);
641 	return (oldvalue);
642 }
643 
644 static __inline void
645 ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
646 {
647 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
648 	ahd_outw(ahd, SNSCB_QOFF, value);
649 }
650 
651 static __inline u_int
652 ahd_get_sescb_qoff(struct ahd_softc *ahd)
653 {
654 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
655 	return (ahd_inb(ahd, SESCB_QOFF));
656 }
657 
658 static __inline void
659 ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
660 {
661 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
662 	ahd_outb(ahd, SESCB_QOFF, value);
663 }
664 
665 static __inline u_int
666 ahd_get_sdscb_qoff(struct ahd_softc *ahd)
667 {
668 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
669 	return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
670 }
671 
672 static __inline void
673 ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
674 {
675 	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
676 	ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
677 	ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
678 }
679 
680 static __inline u_int
681 ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
682 {
683 	u_int value;
684 
685 	/*
686 	 * Workaround PCI-X Rev A. hardware bug.
687 	 * After a host read of SCB memory, the chip
688 	 * may become confused into thinking prefetch
689 	 * was required.  This starts the discard timer
690 	 * running and can cause an unexpected discard
691 	 * timer interrupt.  The work around is to read
692 	 * a normal register prior to the exhaustion of
693 	 * the discard timer.  The mode pointer register
694 	 * has no side effects and so serves well for
695 	 * this purpose.
696 	 *
697 	 * Razor #528
698 	 */
699 	value = ahd_inb(ahd, offset);
700 	if ((ahd->flags & AHD_PCIX_SCBRAM_RD_BUG) != 0)
701 		ahd_inb(ahd, MODE_PTR);
702 	return (value);
703 }
704 
705 static __inline u_int
706 ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
707 {
708 	return (ahd_inb_scbram(ahd, offset)
709 	      | (ahd_inb_scbram(ahd, offset+1) << 8));
710 }
711 
712 static __inline uint32_t
713 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
714 {
715 	return (ahd_inb_scbram(ahd, offset)
716 	      | (ahd_inb_scbram(ahd, offset+1) << 8)
717 	      | (ahd_inb_scbram(ahd, offset+2) << 16)
718 	      | (ahd_inb_scbram(ahd, offset+3) << 24));
719 }
720 
721 static __inline struct scb *
722 ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
723 {
724 	struct scb* scb;
725 
726 	if (tag >= AHD_SCB_MAX)
727 		return (NULL);
728 	scb = ahd->scb_data.scbindex[tag];
729 	if (scb != NULL)
730 		ahd_sync_scb(ahd, scb,
731 			     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
732 	return (scb);
733 }
734 
735 static __inline void
736 ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
737 {
738 	struct hardware_scb *q_hscb;
739 	uint32_t saved_hscb_busaddr;
740 
741 	/*
742 	 * Our queuing method is a bit tricky.  The card
743 	 * knows in advance which HSCB (by address) to download,
744 	 * and we can't disappoint it.  To achieve this, the next
745 	 * HSCB to download is saved off in ahd->next_queued_hscb.
746 	 * When we are called to queue "an arbitrary scb",
747 	 * we copy the contents of the incoming HSCB to the one
748 	 * the sequencer knows about, swap HSCB pointers and
749 	 * finally assign the SCB to the tag indexed location
750 	 * in the scb_array.  This makes sure that we can still
751 	 * locate the correct SCB by SCB_TAG.
752 	 */
753 	q_hscb = ahd->next_queued_hscb;
754 	saved_hscb_busaddr = q_hscb->hscb_busaddr;
755 	memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
756 	q_hscb->hscb_busaddr = saved_hscb_busaddr;
757 	q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
758 
759 	/* Now swap HSCB pointers. */
760 	ahd->next_queued_hscb = scb->hscb;
761 	scb->hscb = q_hscb;
762 
763 	/* Now define the mapping from tag to SCB in the scbindex */
764 	ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
765 }
766 
767 /*
768  * Tell the sequencer about a new transaction to execute.
769  */
770 static __inline void
771 ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
772 {
773 	ahd_swap_with_next_hscb(ahd, scb);
774 
775 	if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
776 		panic("Attempt to queue invalid SCB tag %x\n",
777 		      SCB_GET_TAG(scb));
778 
779 	/*
780 	 * Keep a history of SCBs we've downloaded in the qinfifo.
781 	 */
782 	ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
783 	ahd->qinfifonext++;
784 
785 	if (scb->sg_count != 0)
786 		ahd_setup_data_scb(ahd, scb);
787 	else
788 		ahd_setup_noxfer_scb(ahd, scb);
789 	ahd_setup_scb_common(ahd, scb);
790 
791 	/*
792 	 * Make sure our data is consistent from the
793 	 * perspective of the adapter.
794 	 */
795 	ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
796 
797 #ifdef AHD_DEBUG
798 	if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
799 		printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
800 		       ahd_name(ahd),
801 		       SCB_GET_TAG(scb), scb->hscb->hscb_busaddr,
802 		       (u_int)((scb->hscb->dataptr >> 32) & 0xFFFFFFFF),
803 		       (u_int)(scb->hscb->dataptr & 0xFFFFFFFF),
804 		       scb->hscb->datacnt);
805 	}
806 #endif
807 	/* Tell the adapter about the newly queued SCB */
808 	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
809 }
810 
811 static __inline uint8_t *
812 ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
813 {
814 	return (scb->sense_data);
815 }
816 
817 static __inline uint32_t
818 ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
819 {
820 	return (scb->sense_busaddr);
821 }
822 
823 /************************** Interrupt Processing ******************************/
824 static __inline void	ahd_sync_qoutfifo(struct ahd_softc *ahd, int op);
825 static __inline void	ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
826 static __inline u_int	ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
827 static __inline int	ahd_intr(void *arg);
828 static __inline void	ahd_minphys(struct buf *bp);
829 
830 static __inline void
831 ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
832 {
833 	ahd_dmamap_sync(ahd, ahd->parent_dmat, ahd->shared_data_dmamap,
834 			/*offset*/0, /*len*/AHD_SCB_MAX * sizeof(uint16_t), op);
835 }
836 
837 static __inline void
838 ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
839 {
840 #ifdef AHD_TARGET_MODE
841 	if ((ahd->flags & AHD_TARGETROLE) != 0) {
842 		ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/,
843 				ahd->shared_data_dmamap,
844 				ahd_targetcmd_offset(ahd, 0),
845 				sizeof(struct target_cmd) * AHD_TMODE_CMDS,
846 				op);
847 	}
848 #endif
849 }
850 
851 /*
852  * See if the firmware has posted any completed commands
853  * into our in-core command complete fifos.
854  */
855 #define AHD_RUN_QOUTFIFO 0x1
856 #define AHD_RUN_TQINFIFO 0x2
857 static __inline u_int
858 ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
859 {
860 	u_int retval;
861 
862 	retval = 0;
863 	ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, ahd->shared_data_dmamap,
864 			/*offset*/ahd->qoutfifonext, /*len*/2,
865 			BUS_DMASYNC_POSTREAD);
866 	if ((ahd->qoutfifo[ahd->qoutfifonext]
867 	     & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag)
868 		retval |= AHD_RUN_QOUTFIFO;
869 #ifdef AHD_TARGET_MODE
870 	if ((ahd->flags & AHD_TARGETROLE) != 0
871 	 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
872 		ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/,
873 				ahd->shared_data_dmamap,
874 				ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
875 				/*len*/sizeof(struct target_cmd),
876 				BUS_DMASYNC_POSTREAD);
877 		if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
878 			retval |= AHD_RUN_TQINFIFO;
879 	}
880 #endif
881 	return (retval);
882 }
883 
884 /*
885  * Catch an interrupt from the adapter
886  */
887 static __inline int
888 ahd_intr(void *arg)
889 {
890 	struct ahd_softc *ahd = (struct ahd_softc*)arg;
891 	u_int	intstat;
892 
893 	if ((ahd->pause & INTEN) == 0) {
894 		/*
895 		 * Our interrupt is not enabled on the chip
896 		 * and may be disabled for re-entrancy reasons,
897 		 * so just return.  This is likely just a shared
898 		 * interrupt.
899 		 */
900 		return 0;
901 	}
902 
903 	/*
904 	 * Instead of directly reading the interrupt status register,
905 	 * infer the cause of the interrupt by checking our in-core
906 	 * completion queues.  This avoids a costly PCI bus read in
907 	 * most cases.
908 	 */
909 	if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
910 	    && (ahd_check_cmdcmpltqueues(ahd) != 0))
911 		intstat = CMDCMPLT;
912 	else
913 		intstat = ahd_inb(ahd, INTSTAT);
914 
915 	if (intstat & CMDCMPLT) {
916 		ahd_outb(ahd, CLRINT, CLRCMDINT);
917 
918 		/*
919 		 * Ensure that the chip sees that we've cleared
920 		 * this interrupt before we walk the output fifo.
921 		 * Otherwise, we may, due to posted bus writes,
922 		 * clear the interrupt after we finish the scan,
923 		 * and after the sequencer has added new entries
924 		 * and asserted the interrupt again.
925 		 */
926 		if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
927 			if (ahd_is_paused(ahd)) {
928 				/*
929 				 * Potentially lost SEQINT.
930 				 * If SEQINTCODE is non-zero,
931 				 * simulate the SEQINT.
932 				 */
933 				if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
934 					intstat |= SEQINT;
935 			}
936 		} else {
937 			ahd_flush_device_writes(ahd);
938 		}
939 		scsipi_channel_freeze(&ahd->sc_channel, 1);
940 		ahd_run_qoutfifo(ahd);
941 		scsipi_channel_thaw(&ahd->sc_channel, 1);
942 		ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
943 		ahd->cmdcmplt_total++;
944 #ifdef AHD_TARGET_MODE
945 		if ((ahd->flags & AHD_TARGETROLE) != 0)
946 			ahd_run_tqinfifo(ahd, /*paused*/FALSE);
947 #endif
948 		if (intstat == CMDCMPLT)
949 			return 1;
950 	}
951 
952 	if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0)
953 		/* Hot eject */
954 		return 1;
955 
956 	if ((intstat & INT_PEND) == 0)
957 		return 1;
958 
959 	if (intstat & HWERRINT) {
960 		ahd_handle_hwerrint(ahd);
961 		return 1;
962 	}
963 
964 	if ((intstat & (PCIINT|SPLTINT)) != 0) {
965 		ahd->bus_intr(ahd);
966 		return 1;
967 	}
968 
969 	if ((intstat & (SEQINT)) != 0) {
970 		ahd_handle_seqint(ahd, intstat);
971 		return 1;
972 	}
973 
974 	if ((intstat & SCSIINT) != 0) {
975 		ahd_handle_scsiint(ahd, intstat);
976 		return 1;
977 	}
978 
979 	return 1;
980 }
981 
982 static __inline void
983 ahd_minphys(bp)
984         struct buf *bp;
985 {
986 /*
987  * Even though the card can transfer up to 16megs per command
988  * we are limited by the number of segments in the DMA segment
989  * list that we can hold.  The worst case is that all pages are
990  * discontinuous physically, hense the "page per segment" limit
991  * enforced here.
992  */
993         if (bp->b_bcount > AHD_MAXTRANSFER_SIZE) {
994                 bp->b_bcount = AHD_MAXTRANSFER_SIZE;
995         }
996         minphys(bp);
997 }
998 
999 static __inline u_int32_t scsi_4btoul(u_int8_t *bytes);
1000 
1001 static __inline u_int32_t
1002 scsi_4btoul(u_int8_t *bytes)
1003 {
1004         u_int32_t rv;
1005 
1006         rv = (bytes[0] << 24) |
1007              (bytes[1] << 16) |
1008              (bytes[2] << 8) |
1009              bytes[3];
1010         return (rv);
1011 }
1012 
1013 
1014 #endif  /* _AIC79XX_INLINE_H_ */
1015