1 | /* $NetBSD: aic79xx_inline.h,v 1.22 2013/04/27 13:25:09 kardel Exp $ */ |
2 | |
3 | /* |
4 | * Inline routines shareable across OS platforms. |
5 | * |
6 | * Copyright (c) 1994-2001 Justin T. Gibbs. |
7 | * Copyright (c) 2000-2003 Adaptec Inc. |
8 | * All rights reserved. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions, and the following disclaimer, |
15 | * without modification. |
16 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer |
17 | * substantially similar to the "NO WARRANTY" disclaimer below |
18 | * ("Disclaimer") and any redistribution must be conditioned upon |
19 | * including a substantially similar Disclaimer requirement for further |
20 | * binary redistribution. |
21 | * 3. Neither the names of the above-listed copyright holders nor the names |
22 | * of any contributors may be used to endorse or promote products derived |
23 | * from this software without specific prior written permission. |
24 | * |
25 | * Alternatively, this software may be distributed under the terms of the |
26 | * GNU General Public License ("GPL") version 2 as published by the Free |
27 | * Software Foundation. |
28 | * |
29 | * NO WARRANTY |
30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
31 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
32 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR |
33 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
34 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
35 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
36 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
37 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
38 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
39 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
40 | * POSSIBILITY OF SUCH DAMAGES. |
41 | * |
42 | * Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#51 $ |
43 | * |
44 | * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_inline.h,v 1.12 2003/06/28 04:43:19 gibbs Exp $ |
45 | */ |
46 | /* |
47 | * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003 |
48 | */ |
49 | |
50 | #ifndef _AIC79XX_INLINE_H_ |
51 | #define _AIC79XX_INLINE_H_ |
52 | |
53 | /******************************** Debugging ***********************************/ |
54 | static __inline const char *ahd_name(struct ahd_softc *); |
55 | |
56 | static __inline const char * |
57 | ahd_name(struct ahd_softc *ahd) |
58 | { |
59 | return (ahd->name); |
60 | } |
61 | |
62 | /************************ Sequencer Execution Control *************************/ |
63 | static __inline void ahd_known_modes(struct ahd_softc *, ahd_mode, ahd_mode); |
64 | static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *, |
65 | ahd_mode, ahd_mode); |
66 | static __inline void ahd_extract_mode_state(struct ahd_softc *, |
67 | ahd_mode_state, ahd_mode *, ahd_mode *); |
68 | static __inline void ahd_set_modes(struct ahd_softc *, ahd_mode, ahd_mode); |
69 | static __inline void ahd_update_modes(struct ahd_softc *); |
70 | static __inline void ahd_assert_modes(struct ahd_softc *, ahd_mode, |
71 | ahd_mode, const char *, int); |
72 | static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *); |
73 | static __inline void ahd_restore_modes(struct ahd_softc *, ahd_mode_state); |
74 | static __inline int ahd_is_paused(struct ahd_softc *); |
75 | static __inline void ahd_pause(struct ahd_softc *); |
76 | static __inline void ahd_unpause(struct ahd_softc *); |
77 | |
78 | static __inline void |
79 | ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) |
80 | { |
81 | ahd->src_mode = src; |
82 | ahd->dst_mode = dst; |
83 | ahd->saved_src_mode = src; |
84 | ahd->saved_dst_mode = dst; |
85 | } |
86 | |
87 | static __inline ahd_mode_state |
88 | ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) |
89 | { |
90 | return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT)); |
91 | } |
92 | |
93 | static __inline void |
94 | (struct ahd_softc *ahd, ahd_mode_state state, |
95 | ahd_mode *src, ahd_mode *dst) |
96 | { |
97 | *src = (state & SRC_MODE) >> SRC_MODE_SHIFT; |
98 | *dst = (state & DST_MODE) >> DST_MODE_SHIFT; |
99 | } |
100 | |
101 | static __inline void |
102 | ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) |
103 | { |
104 | if (ahd->src_mode == src && ahd->dst_mode == dst) |
105 | return; |
106 | #ifdef AHD_DEBUG |
107 | if (ahd->src_mode == AHD_MODE_UNKNOWN |
108 | || ahd->dst_mode == AHD_MODE_UNKNOWN) |
109 | panic("Setting mode prior to saving it.\n" ); |
110 | if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) |
111 | printf("%s: Setting mode 0x%x\n" , ahd_name(ahd), |
112 | ahd_build_mode_state(ahd, src, dst)); |
113 | #endif |
114 | ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); |
115 | ahd->src_mode = src; |
116 | ahd->dst_mode = dst; |
117 | } |
118 | |
119 | static __inline void |
120 | ahd_update_modes(struct ahd_softc *ahd) |
121 | { |
122 | ahd_mode_state mode_ptr; |
123 | ahd_mode src; |
124 | ahd_mode dst; |
125 | |
126 | mode_ptr = ahd_inb(ahd, MODE_PTR); |
127 | #ifdef AHD_DEBUG |
128 | if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) |
129 | printf("Reading mode 0x%x\n" , mode_ptr); |
130 | #endif |
131 | ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); |
132 | ahd_known_modes(ahd, src, dst); |
133 | } |
134 | |
135 | static __inline void |
136 | ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, |
137 | ahd_mode dstmode, const char *file, int line) |
138 | { |
139 | #ifdef AHD_DEBUG |
140 | if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 |
141 | || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { |
142 | panic("%s:%s:%d: Mode assertion failed.\n" , |
143 | ahd_name(ahd), file, line); |
144 | } |
145 | #endif |
146 | } |
147 | |
148 | static __inline ahd_mode_state |
149 | ahd_save_modes(struct ahd_softc *ahd) |
150 | { |
151 | if (ahd->src_mode == AHD_MODE_UNKNOWN |
152 | || ahd->dst_mode == AHD_MODE_UNKNOWN) |
153 | ahd_update_modes(ahd); |
154 | |
155 | return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); |
156 | } |
157 | |
158 | static __inline void |
159 | ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) |
160 | { |
161 | ahd_mode src; |
162 | ahd_mode dst; |
163 | |
164 | ahd_extract_mode_state(ahd, state, &src, &dst); |
165 | ahd_set_modes(ahd, src, dst); |
166 | } |
167 | |
168 | #define AHD_ASSERT_MODES(ahd, source, dest) \ |
169 | ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); |
170 | |
171 | /* |
172 | * Determine whether the sequencer has halted code execution. |
173 | * Returns non-zero status if the sequencer is stopped. |
174 | */ |
175 | static __inline int |
176 | ahd_is_paused(struct ahd_softc *ahd) |
177 | { |
178 | return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); |
179 | } |
180 | |
181 | /* |
182 | * Request that the sequencer stop and wait, indefinitely, for it |
183 | * to stop. The sequencer will only acknowledge that it is paused |
184 | * once it has reached an instruction boundary and PAUSEDIS is |
185 | * cleared in the SEQCTL register. The sequencer may use PAUSEDIS |
186 | * for critical sections. |
187 | */ |
188 | static __inline void |
189 | ahd_pause(struct ahd_softc *ahd) |
190 | { |
191 | ahd_outb(ahd, HCNTRL, ahd->pause); |
192 | |
193 | /* |
194 | * Since the sequencer can disable pausing in a critical section, we |
195 | * must loop until it actually stops. |
196 | */ |
197 | while (ahd_is_paused(ahd) == 0) |
198 | ; |
199 | } |
200 | |
201 | /* |
202 | * Allow the sequencer to continue program execution. |
203 | * We check here to ensure that no additional interrupt |
204 | * sources that would cause the sequencer to halt have been |
205 | * asserted. If, for example, a SCSI bus reset is detected |
206 | * while we are fielding a different, pausing, interrupt type, |
207 | * we don't want to release the sequencer before going back |
208 | * into our interrupt handler and dealing with this new |
209 | * condition. |
210 | */ |
211 | static __inline void |
212 | ahd_unpause(struct ahd_softc *ahd) |
213 | { |
214 | /* |
215 | * Automatically restore our modes to those saved |
216 | * prior to the first change of the mode. |
217 | */ |
218 | if (ahd->saved_src_mode != AHD_MODE_UNKNOWN |
219 | && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { |
220 | if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) |
221 | ahd_reset_cmds_pending(ahd); |
222 | ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); |
223 | } |
224 | |
225 | if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) |
226 | ahd_outb(ahd, HCNTRL, ahd->unpause); |
227 | |
228 | ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); |
229 | } |
230 | |
231 | /*********************** Scatter Gather List Handling *************************/ |
232 | static __inline void *ahd_sg_setup(struct ahd_softc *, struct scb *, |
233 | void *, bus_addr_t, bus_size_t, int); |
234 | static __inline void ahd_setup_scb_common(struct ahd_softc *, struct scb *); |
235 | static __inline void ahd_setup_data_scb(struct ahd_softc *, struct scb *); |
236 | static __inline void ahd_setup_noxfer_scb(struct ahd_softc *, struct scb *); |
237 | |
238 | static __inline void * |
239 | ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, |
240 | void *sgptr, bus_addr_t addr, bus_size_t len, int last) |
241 | { |
242 | scb->sg_count++; |
243 | if (sizeof(bus_addr_t) > 4 |
244 | && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { |
245 | struct ahd_dma64_seg *sg; |
246 | |
247 | sg = (struct ahd_dma64_seg *)sgptr; |
248 | sg->addr = ahd_htole64(addr); |
249 | sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); |
250 | return (sg + 1); |
251 | } else { |
252 | struct ahd_dma_seg *sg; |
253 | |
254 | sg = (struct ahd_dma_seg *)sgptr; |
255 | sg->addr = ahd_htole32(addr & 0xFFFFFFFF); |
256 | sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) |
257 | | (last ? AHD_DMA_LAST_SEG : 0)); |
258 | return (sg + 1); |
259 | } |
260 | } |
261 | |
262 | static __inline void |
263 | ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) |
264 | { |
265 | /* XXX Handle target mode SCBs. */ |
266 | scb->crc_retry_count = 0; |
267 | if ((scb->flags & SCB_PACKETIZED) != 0) { |
268 | /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ |
269 | scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; |
270 | } else { |
271 | if (ahd_get_transfer_length(scb) & 0x01) |
272 | scb->hscb->task_attribute = SCB_XFERLEN_ODD; |
273 | else |
274 | scb->hscb->task_attribute = 0; |
275 | } |
276 | |
277 | if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR |
278 | || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) |
279 | scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = |
280 | ahd_htole32(scb->sense_busaddr); |
281 | } |
282 | |
283 | static __inline void |
284 | ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) |
285 | { |
286 | /* |
287 | * Copy the first SG into the "current" data ponter area. |
288 | */ |
289 | if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { |
290 | struct ahd_dma64_seg *sg; |
291 | |
292 | sg = (struct ahd_dma64_seg *)scb->sg_list; |
293 | scb->hscb->dataptr = sg->addr; |
294 | scb->hscb->datacnt = sg->len; |
295 | } else { |
296 | struct ahd_dma_seg *sg; |
297 | uint32_t *dataptr_words; |
298 | |
299 | sg = (struct ahd_dma_seg *)scb->sg_list; |
300 | dataptr_words = (uint32_t*)&scb->hscb->dataptr; |
301 | dataptr_words[0] = sg->addr; |
302 | dataptr_words[1] = 0; |
303 | if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { |
304 | uint64_t high_addr; |
305 | |
306 | high_addr = ahd_le32toh(sg->len) & 0x7F000000; |
307 | scb->hscb->dataptr |= ahd_htole64(high_addr << 8); |
308 | } |
309 | scb->hscb->datacnt = sg->len; |
310 | } |
311 | /* |
312 | * Note where to find the SG entries in bus space. |
313 | * We also set the full residual flag which the |
314 | * sequencer will clear as soon as a data transfer |
315 | * occurs. |
316 | */ |
317 | scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); |
318 | } |
319 | |
320 | static __inline void |
321 | ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) |
322 | { |
323 | scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); |
324 | scb->hscb->dataptr = 0; |
325 | scb->hscb->datacnt = 0; |
326 | } |
327 | |
328 | /************************** Memory mapping routines ***************************/ |
329 | static __inline size_t ahd_sg_size(struct ahd_softc *); |
330 | static __inline void * |
331 | ahd_sg_bus_to_virt(struct ahd_softc *, struct scb *, |
332 | uint32_t); |
333 | static __inline uint32_t |
334 | ahd_sg_virt_to_bus(struct ahd_softc *, struct scb *, |
335 | void *); |
336 | static __inline void ahd_sync_scb(struct ahd_softc *, struct scb *, int); |
337 | static __inline void ahd_sync_sglist(struct ahd_softc *, struct scb *, int); |
338 | static __inline void ahd_sync_sense(struct ahd_softc *, struct scb *, int); |
339 | static __inline uint32_t |
340 | ahd_targetcmd_offset(struct ahd_softc *, u_int); |
341 | |
342 | static __inline size_t |
343 | ahd_sg_size(struct ahd_softc *ahd) |
344 | { |
345 | if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) |
346 | return (sizeof(struct ahd_dma64_seg)); |
347 | return (sizeof(struct ahd_dma_seg)); |
348 | } |
349 | |
350 | static __inline void * |
351 | ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) |
352 | { |
353 | bus_addr_t sg_offset; |
354 | |
355 | /* sg_list_phys points to entry 1, not 0 */ |
356 | sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); |
357 | return ((uint8_t *)scb->sg_list + sg_offset); |
358 | } |
359 | |
360 | static __inline uint32_t |
361 | ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) |
362 | { |
363 | bus_addr_t sg_offset; |
364 | |
365 | /* sg_list_phys points to entry 1, not 0 */ |
366 | sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) |
367 | - ahd_sg_size(ahd); |
368 | |
369 | return (scb->sg_list_busaddr + sg_offset); |
370 | } |
371 | |
372 | static __inline void |
373 | ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) |
374 | { |
375 | ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->hscb_map->dmamap, |
376 | /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, |
377 | /*len*/sizeof(*scb->hscb), op); |
378 | } |
379 | |
380 | static __inline void |
381 | ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) |
382 | { |
383 | if (scb->sg_count == 0) |
384 | return; |
385 | |
386 | ahd_dmamap_sync(ahd, ahd->parent_dmat, scb->sg_map->dmamap, |
387 | /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), |
388 | /*len*/ahd_sg_size(ahd) * scb->sg_count, op); |
389 | } |
390 | |
391 | static __inline void |
392 | ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) |
393 | { |
394 | ahd_dmamap_sync(ahd, ahd->parent_dmat, |
395 | scb->sense_map->dmamap, |
396 | /*offset*/scb->sense_busaddr - scb->sense_map->physaddr, |
397 | /*len*/AHD_SENSE_BUFSIZE, op); |
398 | } |
399 | |
400 | static __inline uint32_t |
401 | ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) |
402 | { |
403 | return (((uint8_t *)&ahd->targetcmds[index]) |
404 | - (uint8_t *)ahd->qoutfifo); |
405 | } |
406 | |
407 | /*********************** Miscellaneous Support Functions ***********************/ |
408 | static __inline void ahd_complete_scb(struct ahd_softc *, struct scb *); |
409 | static __inline void ahd_update_residual(struct ahd_softc *, struct scb *); |
410 | static __inline struct ahd_initiator_tinfo * |
411 | ahd_fetch_transinfo(struct ahd_softc *, char, u_int, |
412 | u_int, struct ahd_tmode_tstate **); |
413 | static __inline uint16_t |
414 | ahd_inw(struct ahd_softc *, u_int); |
415 | static __inline void ahd_outw(struct ahd_softc *, u_int, u_int); |
416 | static __inline uint32_t |
417 | ahd_inl(struct ahd_softc *, u_int); |
418 | static __inline void ahd_outl(struct ahd_softc *, u_int, uint32_t); |
419 | static __inline uint64_t |
420 | ahd_inq(struct ahd_softc *, u_int); |
421 | static __inline void ahd_outq(struct ahd_softc *, u_int, uint64_t); |
422 | static __inline u_int ahd_get_scbptr(struct ahd_softc *); |
423 | static __inline void ahd_set_scbptr(struct ahd_softc *, u_int); |
424 | static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *); |
425 | static __inline void ahd_set_hnscb_qoff(struct ahd_softc *, u_int); |
426 | static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *); |
427 | static __inline void ahd_set_hescb_qoff(struct ahd_softc *, u_int); |
428 | static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *); |
429 | static __inline void ahd_set_snscb_qoff(struct ahd_softc *, u_int); |
430 | static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *); |
431 | static __inline void ahd_set_sescb_qoff(struct ahd_softc *, u_int); |
432 | static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *); |
433 | static __inline void ahd_set_sdscb_qoff(struct ahd_softc *, u_int); |
434 | static __inline u_int ahd_inb_scbram(struct ahd_softc *, u_int); |
435 | static __inline u_int ahd_inw_scbram(struct ahd_softc *, u_int); |
436 | static __inline uint32_t |
437 | ahd_inl_scbram(struct ahd_softc *, u_int); |
438 | static __inline uint64_t |
439 | ahd_inq_scbram(struct ahd_softc *ahd, u_int offset); |
440 | static __inline void ahd_swap_with_next_hscb(struct ahd_softc *, |
441 | struct scb *); |
442 | static __inline void ahd_queue_scb(struct ahd_softc *, struct scb *); |
443 | static __inline uint8_t * |
444 | ahd_get_sense_buf(struct ahd_softc *, struct scb *); |
445 | static __inline uint32_t |
446 | ahd_get_sense_bufaddr(struct ahd_softc *, struct scb *); |
447 | static __inline void ahd_post_scb(struct ahd_softc *, struct scb *); |
448 | |
449 | |
450 | static __inline void |
451 | ahd_post_scb(struct ahd_softc *ahd, struct scb *scb) |
452 | { |
453 | uint32_t sgptr; |
454 | |
455 | sgptr = ahd_le32toh(scb->hscb->sgptr); |
456 | if ((sgptr & SG_STATUS_VALID) != 0) |
457 | ahd_handle_scb_status(ahd, scb); |
458 | else |
459 | ahd_done(ahd, scb); |
460 | } |
461 | |
462 | static __inline void |
463 | ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) |
464 | { |
465 | uint32_t sgptr; |
466 | |
467 | sgptr = ahd_le32toh(scb->hscb->sgptr); |
468 | if ((sgptr & SG_STATUS_VALID) != 0) |
469 | ahd_handle_scb_status(ahd, scb); |
470 | else |
471 | ahd_done(ahd, scb); |
472 | } |
473 | |
474 | /* |
475 | * Determine whether the sequencer reported a residual |
476 | * for this SCB/transaction. |
477 | */ |
478 | static __inline void |
479 | ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) |
480 | { |
481 | uint32_t sgptr; |
482 | |
483 | sgptr = ahd_le32toh(scb->hscb->sgptr); |
484 | if ((sgptr & SG_STATUS_VALID) != 0) |
485 | ahd_calc_residual(ahd, scb); |
486 | } |
487 | |
488 | /* |
489 | * Return pointers to the transfer negotiation information |
490 | * for the specified our_id/remote_id pair. |
491 | */ |
492 | static __inline struct ahd_initiator_tinfo * |
493 | ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, |
494 | u_int remote_id, struct ahd_tmode_tstate **tstate) |
495 | { |
496 | /* |
497 | * Transfer data structures are stored from the perspective |
498 | * of the target role. Since the parameters for a connection |
499 | * in the initiator role to a given target are the same as |
500 | * when the roles are reversed, we pretend we are the target. |
501 | */ |
502 | if (channel == 'B') |
503 | our_id += 8; |
504 | *tstate = ahd->enabled_targets[our_id]; |
505 | return (&(*tstate)->transinfo[remote_id]); |
506 | } |
507 | |
508 | #define AHD_COPY_COL_IDX(dst, src) \ |
509 | do { \ |
510 | dst->hscb->scsiid = src->hscb->scsiid; \ |
511 | dst->hscb->lun = src->hscb->lun; \ |
512 | } while (0) |
513 | |
514 | static __inline uint16_t |
515 | ahd_inw(struct ahd_softc *ahd, u_int port) |
516 | { |
517 | return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port)); |
518 | } |
519 | |
520 | static __inline void |
521 | ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) |
522 | { |
523 | ahd_outb(ahd, port, value & 0xFF); |
524 | ahd_outb(ahd, port+1, (value >> 8) & 0xFF); |
525 | } |
526 | |
527 | static __inline uint32_t |
528 | ahd_inl(struct ahd_softc *ahd, u_int port) |
529 | { |
530 | return ((ahd_inb(ahd, port)) |
531 | | (ahd_inb(ahd, port+1) << 8) |
532 | | (ahd_inb(ahd, port+2) << 16) |
533 | | (ahd_inb(ahd, port+3) << 24)); |
534 | } |
535 | |
536 | static __inline void |
537 | ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) |
538 | { |
539 | ahd_outb(ahd, port, (value) & 0xFF); |
540 | ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); |
541 | ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); |
542 | ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); |
543 | } |
544 | |
545 | static __inline uint64_t |
546 | ahd_inq(struct ahd_softc *ahd, u_int port) |
547 | { |
548 | return ((ahd_inb(ahd, port)) |
549 | | (ahd_inb(ahd, port+1) << 8) |
550 | | (ahd_inb(ahd, port+2) << 16) |
551 | | (ahd_inb(ahd, port+3) << 24) |
552 | | (((uint64_t)ahd_inb(ahd, port+4)) << 32) |
553 | | (((uint64_t)ahd_inb(ahd, port+5)) << 40) |
554 | | (((uint64_t)ahd_inb(ahd, port+6)) << 48) |
555 | | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); |
556 | } |
557 | |
558 | static __inline void |
559 | ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) |
560 | { |
561 | ahd_outb(ahd, port, value & 0xFF); |
562 | ahd_outb(ahd, port+1, (value >> 8) & 0xFF); |
563 | ahd_outb(ahd, port+2, (value >> 16) & 0xFF); |
564 | ahd_outb(ahd, port+3, (value >> 24) & 0xFF); |
565 | ahd_outb(ahd, port+4, (value >> 32) & 0xFF); |
566 | ahd_outb(ahd, port+5, (value >> 40) & 0xFF); |
567 | ahd_outb(ahd, port+6, (value >> 48) & 0xFF); |
568 | ahd_outb(ahd, port+7, (value >> 56) & 0xFF); |
569 | } |
570 | |
571 | static __inline u_int |
572 | ahd_get_scbptr(struct ahd_softc *ahd) |
573 | { |
574 | AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), |
575 | ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); |
576 | return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); |
577 | } |
578 | |
579 | static __inline void |
580 | ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) |
581 | { |
582 | AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), |
583 | ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); |
584 | ahd_outb(ahd, SCBPTR, scbptr & 0xFF); |
585 | ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); |
586 | } |
587 | |
588 | static __inline u_int |
589 | ahd_get_hnscb_qoff(struct ahd_softc *ahd) |
590 | { |
591 | return (ahd_inw_atomic(ahd, HNSCB_QOFF)); |
592 | } |
593 | |
594 | static __inline void |
595 | ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) |
596 | { |
597 | ahd_outw_atomic(ahd, HNSCB_QOFF, value); |
598 | } |
599 | |
600 | static __inline u_int |
601 | ahd_get_hescb_qoff(struct ahd_softc *ahd) |
602 | { |
603 | return (ahd_inb(ahd, HESCB_QOFF)); |
604 | } |
605 | |
606 | static __inline void |
607 | ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) |
608 | { |
609 | ahd_outb(ahd, HESCB_QOFF, value); |
610 | } |
611 | |
612 | static __inline u_int |
613 | ahd_get_snscb_qoff(struct ahd_softc *ahd) |
614 | { |
615 | u_int oldvalue; |
616 | |
617 | AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); |
618 | oldvalue = ahd_inw(ahd, SNSCB_QOFF); |
619 | ahd_outw(ahd, SNSCB_QOFF, oldvalue); |
620 | return (oldvalue); |
621 | } |
622 | |
623 | static __inline void |
624 | ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) |
625 | { |
626 | AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); |
627 | ahd_outw(ahd, SNSCB_QOFF, value); |
628 | } |
629 | |
630 | static __inline u_int |
631 | ahd_get_sescb_qoff(struct ahd_softc *ahd) |
632 | { |
633 | AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); |
634 | return (ahd_inb(ahd, SESCB_QOFF)); |
635 | } |
636 | |
637 | static __inline void |
638 | ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) |
639 | { |
640 | AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); |
641 | ahd_outb(ahd, SESCB_QOFF, value); |
642 | } |
643 | |
644 | static __inline u_int |
645 | ahd_get_sdscb_qoff(struct ahd_softc *ahd) |
646 | { |
647 | AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); |
648 | return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); |
649 | } |
650 | |
651 | static __inline void |
652 | ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) |
653 | { |
654 | AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); |
655 | ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); |
656 | ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); |
657 | } |
658 | |
659 | static __inline u_int |
660 | ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) |
661 | { |
662 | u_int value; |
663 | |
664 | /* |
665 | * Workaround PCI-X Rev A. hardware bug. |
666 | * After a host read of SCB memory, the chip |
667 | * may become confused into thinking prefetch |
668 | * was required. This starts the discard timer |
669 | * running and can cause an unexpected discard |
670 | * timer interrupt. The work around is to read |
671 | * a normal register prior to the exhaustion of |
672 | * the discard timer. The mode pointer register |
673 | * has no side effects and so serves well for |
674 | * this purpose. |
675 | * |
676 | * Razor #528 |
677 | */ |
678 | value = ahd_inb(ahd, offset); |
679 | if ((ahd->flags & AHD_PCIX_SCBRAM_RD_BUG) != 0) |
680 | ahd_inb(ahd, MODE_PTR); |
681 | return (value); |
682 | } |
683 | |
684 | static __inline u_int |
685 | ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) |
686 | { |
687 | return (ahd_inb_scbram(ahd, offset) |
688 | | (ahd_inb_scbram(ahd, offset+1) << 8)); |
689 | } |
690 | |
691 | static __inline uint32_t |
692 | ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) |
693 | { |
694 | return (ahd_inw_scbram(ahd, offset) |
695 | | (ahd_inw_scbram(ahd, offset+2) << 16)); |
696 | } |
697 | |
698 | static __inline uint64_t |
699 | ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) |
700 | { |
701 | return (ahd_inl_scbram(ahd, offset) |
702 | | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); |
703 | } |
704 | |
705 | static __inline struct scb * |
706 | ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) |
707 | { |
708 | struct scb* scb; |
709 | |
710 | if (tag >= AHD_SCB_MAX) |
711 | return (NULL); |
712 | scb = ahd->scb_data.scbindex[tag]; |
713 | if (scb != NULL) |
714 | ahd_sync_scb(ahd, scb, |
715 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
716 | return (scb); |
717 | } |
718 | |
719 | static __inline void |
720 | ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) |
721 | { |
722 | struct hardware_scb *q_hscb; |
723 | struct map_node *q_hscb_map; |
724 | uint32_t saved_hscb_busaddr; |
725 | |
726 | /* |
727 | * Our queuing method is a bit tricky. The card |
728 | * knows in advance which HSCB (by address) to download, |
729 | * and we can't disappoint it. To achieve this, the next |
730 | * HSCB to download is saved off in ahd->next_queued_hscb. |
731 | * When we are called to queue "an arbitrary scb", |
732 | * we copy the contents of the incoming HSCB to the one |
733 | * the sequencer knows about, swap HSCB pointers and |
734 | * finally assign the SCB to the tag indexed location |
735 | * in the scb_array. This makes sure that we can still |
736 | * locate the correct SCB by SCB_TAG. |
737 | */ |
738 | q_hscb = ahd->next_queued_hscb; |
739 | q_hscb_map = ahd->next_queued_hscb_map; |
740 | saved_hscb_busaddr = q_hscb->hscb_busaddr; |
741 | memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); |
742 | q_hscb->hscb_busaddr = saved_hscb_busaddr; |
743 | q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; |
744 | |
745 | /* Now swap HSCB pointers. */ |
746 | ahd->next_queued_hscb = scb->hscb; |
747 | ahd->next_queued_hscb_map = scb->hscb_map; |
748 | scb->hscb = q_hscb; |
749 | scb->hscb_map = q_hscb_map; |
750 | |
751 | KASSERT((vaddr_t)scb->hscb >= (vaddr_t)scb->hscb_map->vaddr && |
752 | (vaddr_t)scb->hscb < (vaddr_t)scb->hscb_map->vaddr + PAGE_SIZE); |
753 | |
754 | /* Now define the mapping from tag to SCB in the scbindex */ |
755 | ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; |
756 | } |
757 | |
758 | /* |
759 | * Tell the sequencer about a new transaction to execute. |
760 | */ |
761 | static __inline void |
762 | ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) |
763 | { |
764 | ahd_swap_with_next_hscb(ahd, scb); |
765 | |
766 | if (SCBID_IS_NULL(SCB_GET_TAG(scb))) |
767 | panic("Attempt to queue invalid SCB tag %x\n" , |
768 | SCB_GET_TAG(scb)); |
769 | |
770 | /* |
771 | * Keep a history of SCBs we've downloaded in the qinfifo. |
772 | */ |
773 | ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); |
774 | ahd->qinfifonext++; |
775 | |
776 | if (scb->sg_count != 0) |
777 | ahd_setup_data_scb(ahd, scb); |
778 | else |
779 | ahd_setup_noxfer_scb(ahd, scb); |
780 | ahd_setup_scb_common(ahd, scb); |
781 | |
782 | /* |
783 | * Make sure our data is consistent from the |
784 | * perspective of the adapter. |
785 | */ |
786 | ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
787 | |
788 | #ifdef AHD_DEBUG |
789 | if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { |
790 | uint64_t host_dataptr; |
791 | |
792 | host_dataptr = ahd_le64toh(scb->hscb->dataptr); |
793 | printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n" , |
794 | ahd_name(ahd), |
795 | SCB_GET_TAG(scb), ahd_le32toh(scb->hscb->hscb_busaddr), |
796 | (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), |
797 | (u_int)(host_dataptr & 0xFFFFFFFF), |
798 | ahd_le32toh(scb->hscb->datacnt)); |
799 | } |
800 | #endif |
801 | /* Tell the adapter about the newly queued SCB */ |
802 | ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); |
803 | } |
804 | |
805 | static __inline uint8_t * |
806 | ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) |
807 | { |
808 | return (scb->sense_data); |
809 | } |
810 | |
811 | static __inline uint32_t |
812 | ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb) |
813 | { |
814 | return (scb->sense_busaddr); |
815 | } |
816 | |
817 | /************************** Interrupt Processing ******************************/ |
818 | static __inline void ahd_sync_qoutfifo(struct ahd_softc *, int); |
819 | static __inline void ahd_sync_tqinfifo(struct ahd_softc *, int); |
820 | static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *); |
821 | static __inline int ahd_intr(void *); |
822 | static __inline void ahd_minphys(struct buf *); |
823 | |
824 | static __inline void |
825 | ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) |
826 | { |
827 | ahd_dmamap_sync(ahd, ahd->parent_dmat, ahd->shared_data_map.dmamap, |
828 | /*offset*/0, /*len*/AHD_SCB_MAX * sizeof(uint16_t), op); |
829 | } |
830 | |
831 | static __inline void |
832 | ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) |
833 | { |
834 | #ifdef AHD_TARGET_MODE |
835 | if ((ahd->flags & AHD_TARGETROLE) != 0) { |
836 | ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, |
837 | ahd->shared_data_map.dmamap, |
838 | ahd_targetcmd_offset(ahd, 0), |
839 | sizeof(struct target_cmd) * AHD_TMODE_CMDS, |
840 | op); |
841 | } |
842 | #endif |
843 | } |
844 | |
845 | /* |
846 | * See if the firmware has posted any completed commands |
847 | * into our in-core command complete fifos. |
848 | */ |
849 | #define AHD_RUN_QOUTFIFO 0x1 |
850 | #define AHD_RUN_TQINFIFO 0x2 |
851 | static __inline u_int |
852 | ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) |
853 | { |
854 | u_int retval; |
855 | |
856 | retval = 0; |
857 | ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, ahd->shared_data_map.dmamap, |
858 | /*offset*/ahd->qoutfifonext, /*len*/2, |
859 | BUS_DMASYNC_POSTREAD); |
860 | if ((ahd->qoutfifo[ahd->qoutfifonext] |
861 | & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag) |
862 | retval |= AHD_RUN_QOUTFIFO; |
863 | #ifdef AHD_TARGET_MODE |
864 | if ((ahd->flags & AHD_TARGETROLE) != 0 |
865 | && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { |
866 | ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/, |
867 | ahd->shared_data_map.dmamap, |
868 | ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), |
869 | /*len*/sizeof(struct target_cmd), |
870 | BUS_DMASYNC_POSTREAD); |
871 | if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) |
872 | retval |= AHD_RUN_TQINFIFO; |
873 | } |
874 | #endif |
875 | return (retval); |
876 | } |
877 | |
878 | /* |
879 | * Catch an interrupt from the adapter |
880 | */ |
881 | static __inline int |
882 | ahd_intr(void *arg) |
883 | { |
884 | struct ahd_softc *ahd = arg; |
885 | u_int intstat; |
886 | |
887 | if ((ahd->pause & INTEN) == 0) { |
888 | /* |
889 | * Our interrupt is not enabled on the chip |
890 | * and may be disabled for re-entrancy reasons, |
891 | * so just return. This is likely just a shared |
892 | * interrupt. |
893 | */ |
894 | return (0); |
895 | } |
896 | |
897 | /* |
898 | * Instead of directly reading the interrupt status register, |
899 | * infer the cause of the interrupt by checking our in-core |
900 | * completion queues. This avoids a costly PCI bus read in |
901 | * most cases. |
902 | */ |
903 | if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 |
904 | && (ahd_check_cmdcmpltqueues(ahd) != 0)) |
905 | intstat = CMDCMPLT; |
906 | else |
907 | intstat = ahd_inb(ahd, INTSTAT); |
908 | |
909 | if ((intstat & INT_PEND) == 0) |
910 | return (0); |
911 | |
912 | if (intstat & CMDCMPLT) { |
913 | ahd_outb(ahd, CLRINT, CLRCMDINT); |
914 | |
915 | /* |
916 | * Ensure that the chip sees that we've cleared |
917 | * this interrupt before we walk the output fifo. |
918 | * Otherwise, we may, due to posted bus writes, |
919 | * clear the interrupt after we finish the scan, |
920 | * and after the sequencer has added new entries |
921 | * and asserted the interrupt again. |
922 | */ |
923 | if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { |
924 | if (ahd_is_paused(ahd)) { |
925 | /* |
926 | * Potentially lost SEQINT. |
927 | * If SEQINTCODE is non-zero, |
928 | * simulate the SEQINT. |
929 | */ |
930 | if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) |
931 | intstat |= SEQINT; |
932 | } |
933 | } else { |
934 | ahd_flush_device_writes(ahd); |
935 | } |
936 | scsipi_channel_freeze(&ahd->sc_channel, 1); |
937 | ahd_run_qoutfifo(ahd); |
938 | scsipi_channel_thaw(&ahd->sc_channel, 1); |
939 | ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; |
940 | ahd->cmdcmplt_total++; |
941 | #ifdef AHD_TARGET_MODE |
942 | if ((ahd->flags & AHD_TARGETROLE) != 0) |
943 | ahd_run_tqinfifo(ahd, /*paused*/FALSE); |
944 | #endif |
945 | if (intstat == CMDCMPLT) |
946 | return 1; |
947 | } |
948 | |
949 | /* |
950 | * Handle statuses that may invalidate our cached |
951 | * copy of INTSTAT separately. |
952 | */ |
953 | if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { |
954 | /* Hot eject. Do nothing */ |
955 | } else if (intstat & HWERRINT) { |
956 | ahd_handle_hwerrint(ahd); |
957 | } else if ((intstat & (PCIINT|SPLTINT)) != 0) { |
958 | ahd->bus_intr(ahd); |
959 | } else { |
960 | |
961 | if ((intstat & SEQINT) != 0) |
962 | ahd_handle_seqint(ahd, intstat); |
963 | |
964 | if ((intstat & SCSIINT) != 0) |
965 | ahd_handle_scsiint(ahd, intstat); |
966 | } |
967 | |
968 | return (1); |
969 | } |
970 | |
971 | static __inline void |
972 | ahd_minphys(struct buf *bp) |
973 | { |
974 | /* |
975 | * Even though the card can transfer up to 16megs per command |
976 | * we are limited by the number of segments in the DMA segment |
977 | * list that we can hold. The worst case is that all pages are |
978 | * discontinuous physically, hence the "page per segment" limit |
979 | * enforced here. |
980 | */ |
981 | if (bp->b_bcount > AHD_MAXTRANSFER_SIZE) { |
982 | bp->b_bcount = AHD_MAXTRANSFER_SIZE; |
983 | } |
984 | minphys(bp); |
985 | } |
986 | |
987 | static __inline u_int32_t scsi_4btoul(u_int8_t *); |
988 | |
989 | static __inline u_int32_t |
990 | scsi_4btoul(u_int8_t *bytes) |
991 | { |
992 | u_int32_t rv; |
993 | |
994 | rv = (bytes[0] << 24) | |
995 | (bytes[1] << 16) | |
996 | (bytes[2] << 8) | |
997 | bytes[3]; |
998 | return (rv); |
999 | } |
1000 | |
1001 | |
1002 | #endif /* _AIC79XX_INLINE_H_ */ |
1003 | |