1 | /* $NetBSD: aic7xxx_inline.h,v 1.14 2009/03/15 15:52:12 cegger Exp $ */ |
2 | |
3 | /* |
4 | * Inline routines shareable across OS platforms. |
5 | * |
6 | * Copyright (c) 1994-2001 Justin T. Gibbs. |
7 | * Copyright (c) 2000-2001 Adaptec Inc. |
8 | * All rights reserved. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions, and the following disclaimer, |
15 | * without modification. |
16 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer |
17 | * substantially similar to the "NO WARRANTY" disclaimer below |
18 | * ("Disclaimer") and any redistribution must be conditioned upon |
19 | * including a substantially similar Disclaimer requirement for further |
20 | * binary redistribution. |
21 | * 3. Neither the names of the above-listed copyright holders nor the names |
22 | * of any contributors may be used to endorse or promote products derived |
23 | * from this software without specific prior written permission. |
24 | * |
25 | * Alternatively, this software may be distributed under the terms of the |
26 | * GNU General Public License ("GPL") version 2 as published by the Free |
27 | * Software Foundation. |
28 | * |
29 | * NO WARRANTY |
30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
31 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
32 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR |
33 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
34 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
35 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
36 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
37 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
38 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
39 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
40 | * POSSIBILITY OF SUCH DAMAGES. |
41 | * |
42 | * //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#39 $ |
43 | * |
44 | * $FreeBSD: /repoman/r/ncvs/src/sys/dev/aic7xxx/aic7xxx_inline.h,v 1.20 2003/01/20 20:44:55 gibbs Exp $ |
45 | */ |
46 | /* |
47 | * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003 |
48 | */ |
49 | |
50 | #ifndef _AIC7XXX_INLINE_H_ |
51 | #define _AIC7XXX_INLINE_H_ |
52 | |
53 | /************************* Sequencer Execution Control ************************/ |
54 | static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); |
55 | static __inline int ahc_is_paused(struct ahc_softc *ahc); |
56 | static __inline void ahc_pause(struct ahc_softc *ahc); |
57 | static __inline void ahc_unpause(struct ahc_softc *ahc); |
58 | |
59 | /* |
60 | * Work around any chip bugs related to halting sequencer execution. |
61 | * On Ultra2 controllers, we must clear the CIOBUS stretch signal by |
62 | * reading a register that will set this signal and deassert it. |
63 | * Without this workaround, if the chip is paused, by an interrupt or |
64 | * manual pause while accessing scb ram, accesses to certain registers |
65 | * will hang the system (infinite pci retries). |
66 | */ |
67 | static __inline void |
68 | ahc_pause_bug_fix(struct ahc_softc *ahc) |
69 | { |
70 | if ((ahc->features & AHC_ULTRA2) != 0) |
71 | (void)ahc_inb(ahc, CCSCBCTL); |
72 | } |
73 | |
74 | /* |
75 | * Determine whether the sequencer has halted code execution. |
76 | * Returns non-zero status if the sequencer is stopped. |
77 | */ |
78 | static __inline int |
79 | ahc_is_paused(struct ahc_softc *ahc) |
80 | { |
81 | return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); |
82 | } |
83 | |
84 | /* |
85 | * Request that the sequencer stop and wait, indefinitely, for it |
86 | * to stop. The sequencer will only acknowledge that it is paused |
87 | * once it has reached an instruction boundary and PAUSEDIS is |
88 | * cleared in the SEQCTL register. The sequencer may use PAUSEDIS |
89 | * for critical sections. |
90 | */ |
91 | static __inline void |
92 | ahc_pause(struct ahc_softc *ahc) |
93 | { |
94 | ahc_outb(ahc, HCNTRL, ahc->pause); |
95 | |
96 | /* |
97 | * Since the sequencer can disable pausing in a critical section, we |
98 | * must loop until it actually stops. |
99 | */ |
100 | while (ahc_is_paused(ahc) == 0) |
101 | ; |
102 | |
103 | ahc_pause_bug_fix(ahc); |
104 | } |
105 | |
106 | /* |
107 | * Allow the sequencer to continue program execution. |
108 | * We check here to ensure that no additional interrupt |
109 | * sources that would cause the sequencer to halt have been |
110 | * asserted. If, for example, a SCSI bus reset is detected |
111 | * while we are fielding a different, pausing, interrupt type, |
112 | * we don't want to release the sequencer before going back |
113 | * into our interrupt handler and dealing with this new |
114 | * condition. |
115 | */ |
116 | static __inline void |
117 | ahc_unpause(struct ahc_softc *ahc) |
118 | { |
119 | if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) |
120 | ahc_outb(ahc, HCNTRL, ahc->unpause); |
121 | } |
122 | |
123 | /*********************** Untagged Transaction Routines ************************/ |
124 | static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); |
125 | static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc); |
126 | |
127 | /* |
128 | * Block our completion routine from starting the next untagged |
129 | * transaction for this target or target lun. |
130 | */ |
131 | static __inline void |
132 | ahc_freeze_untagged_queues(struct ahc_softc *ahc) |
133 | { |
134 | if ((ahc->flags & AHC_SCB_BTT) == 0) |
135 | ahc->untagged_queue_lock++; |
136 | } |
137 | |
138 | /* |
139 | * Allow the next untagged transaction for this target or target lun |
140 | * to be executed. We use a counting semaphore to allow the lock |
141 | * to be acquired recursively. Once the count drops to zero, the |
142 | * transaction queues will be run. |
143 | */ |
144 | static __inline void |
145 | ahc_release_untagged_queues(struct ahc_softc *ahc) |
146 | { |
147 | if ((ahc->flags & AHC_SCB_BTT) == 0) { |
148 | ahc->untagged_queue_lock--; |
149 | if (ahc->untagged_queue_lock == 0) |
150 | ahc_run_untagged_queues(ahc); |
151 | } |
152 | } |
153 | |
154 | /************************** Memory mapping routines ***************************/ |
155 | static __inline struct ahc_dma_seg * |
156 | ahc_sg_bus_to_virt(struct scb *scb, |
157 | uint32_t sg_busaddr); |
158 | static __inline uint32_t |
159 | ahc_sg_virt_to_bus(struct scb *scb, |
160 | struct ahc_dma_seg *sg); |
161 | static __inline uint32_t |
162 | ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); |
163 | static __inline void ahc_sync_scb(struct ahc_softc *ahc, |
164 | struct scb *scb, int op); |
165 | static __inline void ahc_sync_sglist(struct ahc_softc *ahc, |
166 | struct scb *scb, int op); |
167 | static __inline uint32_t |
168 | ahc_targetcmd_offset(struct ahc_softc *ahc, |
169 | u_int index); |
170 | |
171 | static __inline struct ahc_dma_seg * |
172 | ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) |
173 | { |
174 | int sg_index; |
175 | |
176 | sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); |
177 | /* sg_list_phys points to entry 1, not 0 */ |
178 | sg_index++; |
179 | |
180 | return (&scb->sg_list[sg_index]); |
181 | } |
182 | |
183 | static __inline uint32_t |
184 | ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) |
185 | { |
186 | int sg_index; |
187 | |
188 | /* sg_list_phys points to entry 1, not 0 */ |
189 | sg_index = sg - &scb->sg_list[1]; |
190 | |
191 | return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); |
192 | } |
193 | |
194 | static __inline uint32_t |
195 | ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) |
196 | { |
197 | return (ahc->scb_data->hscb_busaddr |
198 | + (sizeof(struct hardware_scb) * index)); |
199 | } |
200 | |
201 | static __inline void |
202 | ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) |
203 | { |
204 | ahc_dmamap_sync(ahc, ahc->parent_dmat, |
205 | ahc->scb_data->hscb_dmamap, |
206 | /*offset*/(scb->hscb - ahc->scb_data->hscbs) * sizeof(*scb->hscb), |
207 | /*len*/sizeof(*scb->hscb), op); |
208 | } |
209 | |
210 | static __inline void |
211 | ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) |
212 | { |
213 | if (scb->sg_count == 0) |
214 | return; |
215 | |
216 | ahc_dmamap_sync(ahc, ahc->parent_dmat, scb->sg_map->sg_dmamap, |
217 | /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) |
218 | * sizeof(struct ahc_dma_seg), |
219 | /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); |
220 | } |
221 | |
222 | static __inline uint32_t |
223 | ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) |
224 | { |
225 | return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); |
226 | } |
227 | |
228 | /******************************** Debugging ***********************************/ |
229 | static inline const char *ahc_name(struct ahc_softc *ahc); |
230 | |
231 | static inline const char * |
232 | ahc_name(struct ahc_softc *ahc) |
233 | { |
234 | return (ahc->name); |
235 | } |
236 | |
237 | /*********************** Miscellaneous Support Functions ***********************/ |
238 | |
239 | static __inline void ahc_update_residual(struct ahc_softc *ahc, |
240 | struct scb *scb); |
241 | static __inline struct ahc_initiator_tinfo * |
242 | ahc_fetch_transinfo(struct ahc_softc *ahc, |
243 | char channel, u_int our_id, |
244 | u_int remote_id, |
245 | struct ahc_tmode_tstate **tstate); |
246 | static __inline uint16_t |
247 | ahc_inw(struct ahc_softc *ahc, u_int port); |
248 | static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, |
249 | u_int value); |
250 | static __inline uint32_t |
251 | ahc_inl(struct ahc_softc *ahc, u_int port); |
252 | static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, |
253 | uint32_t value); |
254 | static __inline uint64_t |
255 | ahc_inq(struct ahc_softc *ahc, u_int port); |
256 | static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, |
257 | uint64_t value); |
258 | static __inline struct scb* |
259 | ahc_get_scb(struct ahc_softc *ahc); |
260 | static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); |
261 | static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, |
262 | struct scb *scb); |
263 | static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); |
264 | static __inline struct scsi_sense_data * |
265 | ahc_get_sense_buf(struct ahc_softc *ahc, |
266 | struct scb *scb); |
267 | static __inline uint32_t |
268 | ahc_get_sense_bufaddr(struct ahc_softc *ahc, |
269 | struct scb *scb); |
270 | |
271 | /* |
272 | * Determine whether the sequencer reported a residual |
273 | * for this SCB/transaction. |
274 | */ |
275 | static __inline void |
276 | ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) |
277 | { |
278 | uint32_t sgptr; |
279 | |
280 | sgptr = ahc_le32toh(scb->hscb->sgptr); |
281 | if ((sgptr & SG_RESID_VALID) != 0) |
282 | ahc_calc_residual(ahc, scb); |
283 | } |
284 | |
285 | /* |
286 | * Return pointers to the transfer negotiation information |
287 | * for the specified our_id/remote_id pair. |
288 | */ |
289 | static __inline struct ahc_initiator_tinfo * |
290 | ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, |
291 | u_int remote_id, struct ahc_tmode_tstate **tstate) |
292 | { |
293 | /* |
294 | * Transfer data structures are stored from the perspective |
295 | * of the target role. Since the parameters for a connection |
296 | * in the initiator role to a given target are the same as |
297 | * when the roles are reversed, we pretend we are the target. |
298 | */ |
299 | #ifdef notdef |
300 | if (channel == 'B') |
301 | our_id += 8; |
302 | #endif |
303 | *tstate = ahc->enabled_targets[our_id]; |
304 | return (&(*tstate)->transinfo[remote_id]); |
305 | } |
306 | |
307 | static __inline uint16_t |
308 | ahc_inw(struct ahc_softc *ahc, u_int port) |
309 | { |
310 | return ((ahc_inb(ahc, port+1) << 8) | ahc_inb(ahc, port)); |
311 | } |
312 | |
313 | static __inline void |
314 | ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) |
315 | { |
316 | ahc_outb(ahc, port, value & 0xFF); |
317 | ahc_outb(ahc, port+1, (value >> 8) & 0xFF); |
318 | } |
319 | |
320 | static __inline uint32_t |
321 | ahc_inl(struct ahc_softc *ahc, u_int port) |
322 | { |
323 | return ((ahc_inb(ahc, port)) |
324 | | (ahc_inb(ahc, port+1) << 8) |
325 | | (ahc_inb(ahc, port+2) << 16) |
326 | | (ahc_inb(ahc, port+3) << 24)); |
327 | } |
328 | |
329 | static __inline void |
330 | ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) |
331 | { |
332 | ahc_outb(ahc, port, (value) & 0xFF); |
333 | ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); |
334 | ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); |
335 | ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); |
336 | } |
337 | |
338 | static __inline uint64_t |
339 | ahc_inq(struct ahc_softc *ahc, u_int port) |
340 | { |
341 | return ((ahc_inb(ahc, port)) |
342 | | (ahc_inb(ahc, port+1) << 8) |
343 | | (ahc_inb(ahc, port+2) << 16) |
344 | | (ahc_inb(ahc, port+3) << 24) |
345 | | (((uint64_t)ahc_inb(ahc, port+4)) << 32) |
346 | | (((uint64_t)ahc_inb(ahc, port+5)) << 40) |
347 | | (((uint64_t)ahc_inb(ahc, port+6)) << 48) |
348 | | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); |
349 | } |
350 | |
351 | static __inline void |
352 | ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) |
353 | { |
354 | ahc_outb(ahc, port, value & 0xFF); |
355 | ahc_outb(ahc, port+1, (value >> 8) & 0xFF); |
356 | ahc_outb(ahc, port+2, (value >> 16) & 0xFF); |
357 | ahc_outb(ahc, port+3, (value >> 24) & 0xFF); |
358 | ahc_outb(ahc, port+4, (value >> 32) & 0xFF); |
359 | ahc_outb(ahc, port+5, (value >> 40) & 0xFF); |
360 | ahc_outb(ahc, port+6, (value >> 48) & 0xFF); |
361 | ahc_outb(ahc, port+7, (value >> 56) & 0xFF); |
362 | } |
363 | |
364 | /* |
365 | * Get a free scb. If there are none, see if we can allocate a new SCB. |
366 | */ |
367 | static __inline struct scb * |
368 | ahc_get_scb(struct ahc_softc *ahc) |
369 | { |
370 | struct scb *scb; |
371 | |
372 | if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) |
373 | return (NULL); |
374 | SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); |
375 | return (scb); |
376 | } |
377 | |
378 | /* |
379 | * Return an SCB resource to the free list. |
380 | */ |
381 | static __inline void |
382 | ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) |
383 | { |
384 | struct hardware_scb *hscb; |
385 | |
386 | hscb = scb->hscb; |
387 | /* Clean up for the next user */ |
388 | ahc->scb_data->scbindex[hscb->tag] = NULL; |
389 | scb->flags = SCB_FREE; |
390 | hscb->control = 0; |
391 | |
392 | SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); |
393 | |
394 | /* Notify the OSM that a resource is now available. */ |
395 | ahc_platform_scb_free(ahc, scb); |
396 | } |
397 | |
398 | static __inline struct scb * |
399 | ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) |
400 | { |
401 | struct scb* scb; |
402 | |
403 | scb = ahc->scb_data->scbindex[tag]; |
404 | if (scb != NULL) |
405 | ahc_sync_scb(ahc, scb, |
406 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
407 | return (scb); |
408 | } |
409 | |
410 | static __inline void |
411 | ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) |
412 | { |
413 | struct hardware_scb *q_hscb; |
414 | u_int saved_tag; |
415 | |
416 | /* |
417 | * Our queuing method is a bit tricky. The card |
418 | * knows in advance which HSCB to download, and we |
419 | * can't disappoint it. To achieve this, the next |
420 | * SCB to download is saved off in ahc->next_queued_scb. |
421 | * When we are called to queue "an arbitrary scb", |
422 | * we copy the contents of the incoming HSCB to the one |
423 | * the sequencer knows about, swap HSCB pointers and |
424 | * finally assign the SCB to the tag indexed location |
425 | * in the scb_array. This makes sure that we can still |
426 | * locate the correct SCB by SCB_TAG. |
427 | */ |
428 | q_hscb = ahc->next_queued_scb->hscb; |
429 | saved_tag = q_hscb->tag; |
430 | memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); |
431 | if ((scb->flags & SCB_CDB32_PTR) != 0) { |
432 | q_hscb->shared_data.cdb_ptr = |
433 | ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) |
434 | + offsetof(struct hardware_scb, cdb32)); |
435 | } |
436 | q_hscb->tag = saved_tag; |
437 | q_hscb->next = scb->hscb->tag; |
438 | |
439 | /* Now swap HSCB pointers. */ |
440 | ahc->next_queued_scb->hscb = scb->hscb; |
441 | scb->hscb = q_hscb; |
442 | |
443 | /* Now define the mapping from tag to SCB in the scbindex */ |
444 | ahc->scb_data->scbindex[scb->hscb->tag] = scb; |
445 | } |
446 | |
447 | /* |
448 | * Tell the sequencer about a new transaction to execute. |
449 | */ |
450 | static __inline void |
451 | ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) |
452 | { |
453 | ahc_swap_with_next_hscb(ahc, scb); |
454 | |
455 | if (scb->hscb->tag == SCB_LIST_NULL |
456 | || scb->hscb->next == SCB_LIST_NULL) |
457 | panic("Attempt to queue invalid SCB tag %x:%x\n" , |
458 | scb->hscb->tag, scb->hscb->next); |
459 | /* |
460 | * Keep a history of SCBs we've downloaded in the qinfifo. |
461 | */ |
462 | ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; |
463 | |
464 | /* |
465 | * Make sure our data is consistent from the |
466 | * perspective of the adapter. |
467 | */ |
468 | ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
469 | |
470 | /* Tell the adapter about the newly queued SCB */ |
471 | if ((ahc->features & AHC_QUEUE_REGS) != 0) { |
472 | ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); |
473 | } else { |
474 | if ((ahc->features & AHC_AUTOPAUSE) == 0) |
475 | ahc_pause(ahc); |
476 | ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); |
477 | if ((ahc->features & AHC_AUTOPAUSE) == 0) |
478 | ahc_unpause(ahc); |
479 | } |
480 | } |
481 | |
482 | static __inline struct scsi_sense_data * |
483 | ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) |
484 | { |
485 | int offset; |
486 | |
487 | offset = scb - ahc->scb_data->scbarray; |
488 | return (&ahc->scb_data->sense[offset]); |
489 | } |
490 | |
491 | static __inline uint32_t |
492 | ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) |
493 | { |
494 | int offset; |
495 | |
496 | offset = scb - ahc->scb_data->scbarray; |
497 | return (ahc->scb_data->sense_busaddr |
498 | + (offset * sizeof(struct scsi_sense_data))); |
499 | } |
500 | |
501 | /************************** Interrupt Processing ******************************/ |
502 | static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); |
503 | static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op); |
504 | static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); |
505 | static __inline int ahc_intr(void *arg); |
506 | static __inline void ahc_minphys(struct buf *bp); |
507 | |
508 | static __inline void |
509 | ahc_minphys(struct buf *bp) |
510 | { |
511 | /* |
512 | * Even though the card can transfer up to 16megs per command |
513 | * we are limited by the number of segments in the DMA segment |
514 | * list that we can hold. The worst case is that all pages are |
515 | * discontinuous physically, hence the "page per segment" limit |
516 | * enforced here. |
517 | */ |
518 | if (bp->b_bcount > AHC_MAXTRANSFER_SIZE) { |
519 | bp->b_bcount = AHC_MAXTRANSFER_SIZE; |
520 | } |
521 | minphys(bp); |
522 | } |
523 | |
524 | static __inline void |
525 | ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) |
526 | { |
527 | ahc_dmamap_sync(ahc, ahc->parent_dmat, ahc->shared_data_dmamap, |
528 | /*offset*/0, /*len*/256, op); |
529 | } |
530 | |
531 | static __inline void |
532 | ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) |
533 | { |
534 | #ifdef AHC_TARGET_MODE |
535 | if ((ahc->flags & AHC_TARGETROLE) != 0) { |
536 | ahc_dmamap_sync(ahc, ahc->parent_dmat /*shared_data_dmat*/, |
537 | ahc->shared_data_dmamap, |
538 | ahc_targetcmd_offset(ahc, 0), |
539 | sizeof(struct target_cmd) * AHC_TMODE_CMDS, |
540 | op); |
541 | } |
542 | #endif |
543 | } |
544 | |
545 | /* |
546 | * See if the firmware has posted any completed commands |
547 | * into our in-core command complete fifos. |
548 | */ |
549 | #define AHC_RUN_QOUTFIFO 0x1 |
550 | #define AHC_RUN_TQINFIFO 0x2 |
551 | static __inline u_int |
552 | ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) |
553 | { |
554 | u_int retval; |
555 | |
556 | retval = 0; |
557 | ahc_dmamap_sync(ahc, ahc->parent_dmat /*shared_data_dmat*/, ahc->shared_data_dmamap, |
558 | /*offset*/ahc->qoutfifonext, /*len*/1, |
559 | BUS_DMASYNC_POSTREAD); |
560 | if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) |
561 | retval |= AHC_RUN_QOUTFIFO; |
562 | #ifdef AHC_TARGET_MODE |
563 | if ((ahc->flags & AHC_TARGETROLE) != 0 |
564 | && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { |
565 | ahc_dmamap_sync(ahc, ahc->parent_dmat /*shared_data_dmat*/, |
566 | ahc->shared_data_dmamap, |
567 | ahc_targetcmd_offset(ahc, ahc->tqinfifonext), |
568 | /*len*/sizeof(struct target_cmd), |
569 | BUS_DMASYNC_POSTREAD); |
570 | if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) |
571 | retval |= AHC_RUN_TQINFIFO; |
572 | } |
573 | #endif |
574 | return (retval); |
575 | } |
576 | |
577 | /* |
578 | * Catch an interrupt from the adapter |
579 | */ |
580 | static __inline int |
581 | ahc_intr(void *arg) |
582 | { |
583 | struct ahc_softc *ahc = (struct ahc_softc*)arg; |
584 | u_int intstat; |
585 | |
586 | if ((ahc->pause & INTEN) == 0) { |
587 | /* |
588 | * Our interrupt is not enabled on the chip |
589 | * and may be disabled for re-entrancy reasons, |
590 | * so just return. This is likely just a shared |
591 | * interrupt. |
592 | */ |
593 | return 1; |
594 | } |
595 | /* |
596 | * Instead of directly reading the interrupt status register, |
597 | * infer the cause of the interrupt by checking our in-core |
598 | * completion queues. This avoids a costly PCI bus read in |
599 | * most cases. |
600 | */ |
601 | if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 |
602 | && (ahc_check_cmdcmpltqueues(ahc) != 0)) |
603 | intstat = CMDCMPLT; |
604 | else { |
605 | intstat = ahc_inb(ahc, INTSTAT); |
606 | } |
607 | |
608 | if (intstat & CMDCMPLT) { |
609 | ahc_outb(ahc, CLRINT, CLRCMDINT); |
610 | /* |
611 | * Ensure that the chip sees that we've cleared |
612 | * this interrupt before we walk the output fifo. |
613 | * Otherwise, we may, due to posted bus writes, |
614 | * clear the interrupt after we finish the scan, |
615 | * and after the sequencer has added new entries |
616 | * and asserted the interrupt again. |
617 | */ |
618 | ahc_flush_device_writes(ahc); |
619 | scsipi_channel_freeze(ahc->channel == 'A' ? &ahc->sc_channel : &ahc->sc_channel_b, 1); |
620 | ahc_run_qoutfifo(ahc); |
621 | scsipi_channel_thaw(ahc->channel == 'A' ? &ahc->sc_channel : &ahc->sc_channel_b, 1); |
622 | #ifdef AHC_TARGET_MODE |
623 | if ((ahc->flags & AHC_TARGETROLE) != 0) |
624 | ahc_run_tqinfifo(ahc, /*paused*/FALSE); |
625 | #endif |
626 | } |
627 | |
628 | if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) |
629 | /* Hot eject */ |
630 | return 1; |
631 | |
632 | if ((intstat & INT_PEND) == 0) { |
633 | #if AHC_PCI_CONFIG > 0 |
634 | if (ahc->unsolicited_ints > 500) { |
635 | ahc->unsolicited_ints = 0; |
636 | if ((ahc->chip & AHC_PCI) != 0 |
637 | && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) |
638 | ahc->bus_intr(ahc); |
639 | } |
640 | #endif |
641 | ahc->unsolicited_ints++; |
642 | return 1; |
643 | } |
644 | ahc->unsolicited_ints = 0; |
645 | |
646 | if (intstat & BRKADRINT) { |
647 | ahc_handle_brkadrint(ahc); |
648 | /* Fatal error, no more interrupts to handle. */ |
649 | return 1; |
650 | } |
651 | |
652 | if ((intstat & (SEQINT|SCSIINT)) != 0) |
653 | ahc_pause_bug_fix(ahc); |
654 | |
655 | if ((intstat & SEQINT) != 0) |
656 | ahc_handle_seqint(ahc, intstat); |
657 | |
658 | if ((intstat & SCSIINT) != 0) |
659 | ahc_handle_scsiint(ahc, intstat); |
660 | |
661 | return 1; |
662 | } |
663 | |
664 | #endif /* _AIC7XXX_INLINE_H_ */ |
665 | |