1 | /* $NetBSD: siop_common.c,v 1.54 2013/09/15 13:56:27 martin Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2000, 2002 Manuel Bouyer. |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | * |
26 | */ |
27 | |
28 | /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ |
29 | |
30 | #include <sys/cdefs.h> |
31 | __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.54 2013/09/15 13:56:27 martin Exp $" ); |
32 | |
33 | #include <sys/param.h> |
34 | #include <sys/systm.h> |
35 | #include <sys/device.h> |
36 | #include <sys/malloc.h> |
37 | #include <sys/buf.h> |
38 | #include <sys/kernel.h> |
39 | #include <sys/scsiio.h> |
40 | |
41 | #include <machine/endian.h> |
42 | #include <sys/bus.h> |
43 | |
44 | #include <dev/scsipi/scsi_all.h> |
45 | #include <dev/scsipi/scsi_message.h> |
46 | #include <dev/scsipi/scsipi_all.h> |
47 | |
48 | #include <dev/scsipi/scsiconf.h> |
49 | |
50 | #include <dev/ic/siopreg.h> |
51 | #include <dev/ic/siopvar_common.h> |
52 | |
53 | #include "opt_siop.h" |
54 | |
55 | #undef DEBUG |
56 | #undef DEBUG_DR |
57 | #undef DEBUG_NEG |
58 | |
59 | int |
60 | siop_common_attach(struct siop_common_softc *sc) |
61 | { |
62 | int error, i; |
63 | bus_dma_segment_t seg; |
64 | int rseg; |
65 | |
66 | /* |
67 | * Allocate DMA-safe memory for the script and map it. |
68 | */ |
69 | if ((sc->features & SF_CHIP_RAM) == 0) { |
70 | error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, |
71 | PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); |
72 | if (error) { |
73 | aprint_error_dev(sc->sc_dev, |
74 | "unable to allocate script DMA memory, " |
75 | "error = %d\n" , error); |
76 | return error; |
77 | } |
78 | error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, |
79 | (void **)&sc->sc_script, |
80 | BUS_DMA_NOWAIT|BUS_DMA_COHERENT); |
81 | if (error) { |
82 | aprint_error_dev(sc->sc_dev, |
83 | "unable to map script DMA memory, " |
84 | "error = %d\n" , error); |
85 | return error; |
86 | } |
87 | error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, |
88 | PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma); |
89 | if (error) { |
90 | aprint_error_dev(sc->sc_dev, |
91 | "unable to create script DMA map, " |
92 | "error = %d\n" , error); |
93 | return error; |
94 | } |
95 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma, |
96 | sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); |
97 | if (error) { |
98 | aprint_error_dev(sc->sc_dev, |
99 | "unable to load script DMA map, " |
100 | "error = %d\n" , error); |
101 | return error; |
102 | } |
103 | sc->sc_scriptaddr = |
104 | sc->sc_scriptdma->dm_segs[0].ds_addr; |
105 | sc->ram_size = PAGE_SIZE; |
106 | } |
107 | |
108 | sc->sc_adapt.adapt_dev = sc->sc_dev; |
109 | sc->sc_adapt.adapt_nchannels = 1; |
110 | sc->sc_adapt.adapt_openings = 0; |
111 | sc->sc_adapt.adapt_ioctl = siop_ioctl; |
112 | sc->sc_adapt.adapt_minphys = minphys; |
113 | |
114 | memset(&sc->sc_chan, 0, sizeof(sc->sc_chan)); |
115 | sc->sc_chan.chan_adapter = &sc->sc_adapt; |
116 | sc->sc_chan.chan_bustype = &scsi_bustype; |
117 | sc->sc_chan.chan_channel = 0; |
118 | sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW; |
119 | sc->sc_chan.chan_ntargets = |
120 | (sc->features & SF_BUS_WIDE) ? 16 : 8; |
121 | sc->sc_chan.chan_nluns = 8; |
122 | sc->sc_chan.chan_id = |
123 | bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID); |
124 | if (sc->sc_chan.chan_id == 0 || |
125 | sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets) |
126 | sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET; |
127 | |
128 | for (i = 0; i < 16; i++) |
129 | sc->targets[i] = NULL; |
130 | |
131 | /* find min/max sync period for this chip */ |
132 | sc->st_maxsync = 0; |
133 | sc->dt_maxsync = 0; |
134 | sc->st_minsync = 255; |
135 | sc->dt_minsync = 255; |
136 | for (i = 0; i < __arraycount(scf_period); i++) { |
137 | if (sc->clock_period != scf_period[i].clock) |
138 | continue; |
139 | if (sc->st_maxsync < scf_period[i].period) |
140 | sc->st_maxsync = scf_period[i].period; |
141 | if (sc->st_minsync > scf_period[i].period) |
142 | sc->st_minsync = scf_period[i].period; |
143 | } |
144 | if (sc->st_maxsync == 255 || sc->st_minsync == 0) |
145 | panic("siop: can't find my sync parameters" ); |
146 | for (i = 0; i < __arraycount(dt_scf_period); i++) { |
147 | if (sc->clock_period != dt_scf_period[i].clock) |
148 | continue; |
149 | if (sc->dt_maxsync < dt_scf_period[i].period) |
150 | sc->dt_maxsync = dt_scf_period[i].period; |
151 | if (sc->dt_minsync > dt_scf_period[i].period) |
152 | sc->dt_minsync = dt_scf_period[i].period; |
153 | } |
154 | if (sc->dt_maxsync == 255 || sc->dt_minsync == 0) |
155 | panic("siop: can't find my sync parameters" ); |
156 | return 0; |
157 | } |
158 | |
159 | void |
160 | siop_common_reset(struct siop_common_softc *sc) |
161 | { |
162 | u_int32_t stest1, stest3; |
163 | |
164 | /* reset the chip */ |
165 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST); |
166 | delay(1000); |
167 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0); |
168 | |
169 | /* init registers */ |
170 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0, |
171 | SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP); |
172 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0); |
173 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div); |
174 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0); |
175 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff); |
176 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0, |
177 | 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL)); |
178 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1, |
179 | 0xff & ~(SIEN1_HTH | SIEN1_GEN)); |
180 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0); |
181 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE); |
182 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0, |
183 | (0xb << STIME0_SEL_SHIFT)); |
184 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID, |
185 | sc->sc_chan.chan_id | SCID_RRE); |
186 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0, |
187 | 1 << sc->sc_chan.chan_id); |
188 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL, |
189 | (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM); |
190 | if (sc->features & SF_CHIP_AAIP) |
191 | bus_space_write_1(sc->sc_rt, sc->sc_rh, |
192 | SIOP_AIPCNTL1, AIPCNTL1_DIS); |
193 | |
194 | /* enable clock doubler or quadruler if appropriate */ |
195 | if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) { |
196 | stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3); |
197 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, |
198 | STEST1_DBLEN); |
199 | if (sc->features & SF_CHIP_QUAD) { |
200 | /* wait for PPL to lock */ |
201 | while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, |
202 | SIOP_STEST4) & STEST4_LOCK) == 0) |
203 | delay(10); |
204 | } else { |
205 | /* data sheet says 20us - more won't hurt */ |
206 | delay(100); |
207 | } |
208 | /* halt scsi clock, select doubler/quad, restart clock */ |
209 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, |
210 | stest3 | STEST3_HSC); |
211 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, |
212 | STEST1_DBLEN | STEST1_DBLSEL); |
213 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3); |
214 | } else { |
215 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0); |
216 | } |
217 | |
218 | if (sc->features & SF_CHIP_USEPCIC) { |
219 | stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1); |
220 | stest1 |= STEST1_SCLK; |
221 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1); |
222 | } |
223 | |
224 | if (sc->features & SF_CHIP_FIFO) |
225 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5, |
226 | bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) | |
227 | CTEST5_DFS); |
228 | if (sc->features & SF_CHIP_LED0) { |
229 | /* Set GPIO0 as output if software LED control is required */ |
230 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL, |
231 | bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe); |
232 | } |
233 | if (sc->features & SF_BUS_ULTRA3) { |
234 | /* reset SCNTL4 */ |
235 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0); |
236 | } |
237 | sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & |
238 | STEST4_MODE_MASK; |
239 | |
240 | /* |
241 | * initialise the RAM. Without this we may get scsi gross errors on |
242 | * the 1010 |
243 | */ |
244 | if (sc->features & SF_CHIP_RAM) |
245 | bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh, |
246 | 0, 0, sc->ram_size / 4); |
247 | sc->sc_reset(sc); |
248 | } |
249 | |
250 | /* prepare tables before sending a cmd */ |
251 | void |
252 | siop_setuptables(struct siop_common_cmd *siop_cmd) |
253 | { |
254 | int i; |
255 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
256 | struct scsipi_xfer *xs = siop_cmd->xs; |
257 | int target = xs->xs_periph->periph_target; |
258 | int lun = xs->xs_periph->periph_lun; |
259 | int msgoffset = 1; |
260 | |
261 | siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id); |
262 | memset(siop_cmd->siop_tables->msg_out, 0, |
263 | sizeof(siop_cmd->siop_tables->msg_out)); |
264 | /* request sense doesn't disconnect */ |
265 | if (xs->xs_control & XS_CTL_REQSENSE) |
266 | siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); |
267 | else if ((sc->features & SF_CHIP_GEBUG) && |
268 | (sc->targets[target]->flags & TARF_ISWIDE) == 0) |
269 | /* |
270 | * 1010 bug: it seems that the 1010 has problems with reselect |
271 | * when not in wide mode (generate false SCSI gross error). |
272 | * The FreeBSD sym driver has comments about it but their |
273 | * workaround (disable SCSI gross error reporting) doesn't |
274 | * work with my adapter. So disable disconnect when not |
275 | * wide. |
276 | */ |
277 | siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); |
278 | else |
279 | siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1); |
280 | if (xs->xs_tag_type != 0) { |
281 | if ((sc->targets[target]->flags & TARF_TAG) == 0) { |
282 | scsipi_printaddr(xs->xs_periph); |
283 | printf(": tagged command type %d id %d\n" , |
284 | siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id); |
285 | panic("tagged command for non-tagging device" ); |
286 | } |
287 | siop_cmd->flags |= CMDFL_TAG; |
288 | siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type; |
289 | /* |
290 | * use siop_cmd->tag not xs->xs_tag_id, caller may want a |
291 | * different one |
292 | */ |
293 | siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag; |
294 | msgoffset = 3; |
295 | } |
296 | siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset); |
297 | if (sc->targets[target]->status == TARST_ASYNC) { |
298 | if ((sc->targets[target]->flags & TARF_DT) && |
299 | (sc->mode == STEST4_MODE_LVD)) { |
300 | sc->targets[target]->status = TARST_PPR_NEG; |
301 | siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync, |
302 | sc->maxoff); |
303 | } else if (sc->targets[target]->flags & TARF_WIDE) { |
304 | sc->targets[target]->status = TARST_WIDE_NEG; |
305 | siop_wdtr_msg(siop_cmd, msgoffset, |
306 | MSG_EXT_WDTR_BUS_16_BIT); |
307 | } else if (sc->targets[target]->flags & TARF_SYNC) { |
308 | sc->targets[target]->status = TARST_SYNC_NEG; |
309 | siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync, |
310 | (sc->maxoff > 31) ? 31 : sc->maxoff); |
311 | } else { |
312 | sc->targets[target]->status = TARST_OK; |
313 | siop_update_xfer_mode(sc, target); |
314 | } |
315 | } |
316 | siop_cmd->siop_tables->status = |
317 | siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */ |
318 | |
319 | siop_cmd->siop_tables->cmd.count = |
320 | siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len); |
321 | siop_cmd->siop_tables->cmd.addr = |
322 | siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr); |
323 | if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { |
324 | for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) { |
325 | siop_cmd->siop_tables->data[i].count = |
326 | siop_htoc32(sc, |
327 | siop_cmd->dmamap_data->dm_segs[i].ds_len); |
328 | siop_cmd->siop_tables->data[i].addr = |
329 | siop_htoc32(sc, |
330 | siop_cmd->dmamap_data->dm_segs[i].ds_addr); |
331 | } |
332 | } |
333 | } |
334 | |
335 | int |
336 | siop_wdtr_neg(struct siop_common_cmd *siop_cmd) |
337 | { |
338 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
339 | struct siop_common_target *siop_target = siop_cmd->siop_target; |
340 | int target = siop_cmd->xs->xs_periph->periph_target; |
341 | struct siop_common_xfer *tables = siop_cmd->siop_tables; |
342 | |
343 | if (siop_target->status == TARST_WIDE_NEG) { |
344 | /* we initiated wide negotiation */ |
345 | switch (tables->msg_in[3]) { |
346 | case MSG_EXT_WDTR_BUS_8_BIT: |
347 | siop_target->flags &= ~TARF_ISWIDE; |
348 | sc->targets[target]->id &= ~(SCNTL3_EWS << 24); |
349 | break; |
350 | case MSG_EXT_WDTR_BUS_16_BIT: |
351 | if (siop_target->flags & TARF_WIDE) { |
352 | siop_target->flags |= TARF_ISWIDE; |
353 | sc->targets[target]->id |= (SCNTL3_EWS << 24); |
354 | break; |
355 | } |
356 | /* FALLTHROUGH */ |
357 | default: |
358 | /* |
359 | * hum, we got more than what we can handle, shouldn't |
360 | * happen. Reject, and stay async |
361 | */ |
362 | siop_target->flags &= ~TARF_ISWIDE; |
363 | siop_target->status = TARST_OK; |
364 | siop_target->offset = siop_target->period = 0; |
365 | siop_update_xfer_mode(sc, target); |
366 | printf("%s: rejecting invalid wide negotiation from " |
367 | "target %d (%d)\n" , device_xname(sc->sc_dev), |
368 | target, |
369 | tables->msg_in[3]); |
370 | tables->t_msgout.count = siop_htoc32(sc, 1); |
371 | tables->msg_out[0] = MSG_MESSAGE_REJECT; |
372 | return SIOP_NEG_MSGOUT; |
373 | } |
374 | tables->id = siop_htoc32(sc, sc->targets[target]->id); |
375 | bus_space_write_1(sc->sc_rt, sc->sc_rh, |
376 | SIOP_SCNTL3, |
377 | (sc->targets[target]->id >> 24) & 0xff); |
378 | /* we now need to do sync */ |
379 | if (siop_target->flags & TARF_SYNC) { |
380 | siop_target->status = TARST_SYNC_NEG; |
381 | siop_sdtr_msg(siop_cmd, 0, sc->st_minsync, |
382 | (sc->maxoff > 31) ? 31 : sc->maxoff); |
383 | return SIOP_NEG_MSGOUT; |
384 | } else { |
385 | siop_target->status = TARST_OK; |
386 | siop_update_xfer_mode(sc, target); |
387 | return SIOP_NEG_ACK; |
388 | } |
389 | } else { |
390 | /* target initiated wide negotiation */ |
391 | if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT |
392 | && (siop_target->flags & TARF_WIDE)) { |
393 | siop_target->flags |= TARF_ISWIDE; |
394 | sc->targets[target]->id |= SCNTL3_EWS << 24; |
395 | } else { |
396 | siop_target->flags &= ~TARF_ISWIDE; |
397 | sc->targets[target]->id &= ~(SCNTL3_EWS << 24); |
398 | } |
399 | tables->id = siop_htoc32(sc, sc->targets[target]->id); |
400 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, |
401 | (sc->targets[target]->id >> 24) & 0xff); |
402 | /* |
403 | * we did reset wide parameters, so fall back to async, |
404 | * but don't schedule a sync neg, target should initiate it |
405 | */ |
406 | siop_target->status = TARST_OK; |
407 | siop_target->offset = siop_target->period = 0; |
408 | siop_update_xfer_mode(sc, target); |
409 | siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ? |
410 | MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT); |
411 | return SIOP_NEG_MSGOUT; |
412 | } |
413 | } |
414 | |
415 | int |
416 | siop_ppr_neg(struct siop_common_cmd *siop_cmd) |
417 | { |
418 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
419 | struct siop_common_target *siop_target = siop_cmd->siop_target; |
420 | int target = siop_cmd->xs->xs_periph->periph_target; |
421 | struct siop_common_xfer *tables = siop_cmd->siop_tables; |
422 | int sync, offset, options, scf = 0; |
423 | int i; |
424 | |
425 | #ifdef DEBUG_NEG |
426 | printf("%s: answer on ppr negotiation:" , device_xname(sc->sc_dev)); |
427 | for (i = 0; i < 8; i++) |
428 | printf(" 0x%x" , tables->msg_in[i]); |
429 | printf("\n" ); |
430 | #endif |
431 | |
432 | if (siop_target->status == TARST_PPR_NEG) { |
433 | /* we initiated PPR negotiation */ |
434 | sync = tables->msg_in[3]; |
435 | offset = tables->msg_in[5]; |
436 | options = tables->msg_in[7]; |
437 | if (options != MSG_EXT_PPR_DT) { |
438 | /* should't happen */ |
439 | printf("%s: ppr negotiation for target %d: " |
440 | "no DT option\n" , device_xname(sc->sc_dev), target); |
441 | siop_target->status = TARST_ASYNC; |
442 | siop_target->flags &= ~(TARF_DT | TARF_ISDT); |
443 | siop_target->offset = 0; |
444 | siop_target->period = 0; |
445 | goto reject; |
446 | } |
447 | |
448 | if (offset > sc->maxoff || sync < sc->dt_minsync || |
449 | sync > sc->dt_maxsync) { |
450 | printf("%s: ppr negotiation for target %d: " |
451 | "offset (%d) or sync (%d) out of range\n" , |
452 | device_xname(sc->sc_dev), target, offset, sync); |
453 | /* should not happen */ |
454 | siop_target->offset = 0; |
455 | siop_target->period = 0; |
456 | goto reject; |
457 | } else { |
458 | for (i = 0; i < __arraycount(dt_scf_period); i++) { |
459 | if (sc->clock_period != dt_scf_period[i].clock) |
460 | continue; |
461 | if (dt_scf_period[i].period == sync) { |
462 | /* ok, found it. we now are sync. */ |
463 | siop_target->offset = offset; |
464 | siop_target->period = sync; |
465 | scf = dt_scf_period[i].scf; |
466 | siop_target->flags |= TARF_ISDT; |
467 | } |
468 | } |
469 | if ((siop_target->flags & TARF_ISDT) == 0) { |
470 | printf("%s: ppr negotiation for target %d: " |
471 | "sync (%d) incompatible with adapter\n" , |
472 | device_xname(sc->sc_dev), target, sync); |
473 | /* |
474 | * we didn't find it in our table, do async |
475 | * send reject msg, start SDTR/WDTR neg |
476 | */ |
477 | siop_target->status = TARST_ASYNC; |
478 | siop_target->flags &= ~(TARF_DT | TARF_ISDT); |
479 | siop_target->offset = 0; |
480 | siop_target->period = 0; |
481 | goto reject; |
482 | } |
483 | } |
484 | if (tables->msg_in[6] != 1) { |
485 | printf("%s: ppr negotiation for target %d: " |
486 | "transfer width (%d) incompatible with dt\n" , |
487 | device_xname(sc->sc_dev), |
488 | target, tables->msg_in[6]); |
489 | /* DT mode can only be done with wide transfers */ |
490 | siop_target->status = TARST_ASYNC; |
491 | goto reject; |
492 | } |
493 | siop_target->flags |= TARF_ISWIDE; |
494 | sc->targets[target]->id |= (SCNTL3_EWS << 24); |
495 | sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); |
496 | sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT); |
497 | sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); |
498 | sc->targets[target]->id |= |
499 | (siop_target->offset & SXFER_MO_MASK) << 8; |
500 | sc->targets[target]->id &= ~0xff; |
501 | sc->targets[target]->id |= SCNTL4_U3EN; |
502 | siop_target->status = TARST_OK; |
503 | siop_update_xfer_mode(sc, target); |
504 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, |
505 | (sc->targets[target]->id >> 24) & 0xff); |
506 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, |
507 | (sc->targets[target]->id >> 8) & 0xff); |
508 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, |
509 | sc->targets[target]->id & 0xff); |
510 | return SIOP_NEG_ACK; |
511 | } else { |
512 | /* target initiated PPR negotiation, shouldn't happen */ |
513 | printf("%s: rejecting invalid PPR negotiation from " |
514 | "target %d\n" , device_xname(sc->sc_dev), target); |
515 | reject: |
516 | tables->t_msgout.count = siop_htoc32(sc, 1); |
517 | tables->msg_out[0] = MSG_MESSAGE_REJECT; |
518 | return SIOP_NEG_MSGOUT; |
519 | } |
520 | } |
521 | |
522 | int |
523 | siop_sdtr_neg(struct siop_common_cmd *siop_cmd) |
524 | { |
525 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
526 | struct siop_common_target *siop_target = siop_cmd->siop_target; |
527 | int target = siop_cmd->xs->xs_periph->periph_target; |
528 | int sync, maxoffset, offset, i; |
529 | int send_msgout = 0; |
530 | struct siop_common_xfer *tables = siop_cmd->siop_tables; |
531 | |
532 | /* limit to Ultra/2 parameters, need PPR for Ultra/3 */ |
533 | maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff; |
534 | |
535 | sync = tables->msg_in[3]; |
536 | offset = tables->msg_in[4]; |
537 | |
538 | if (siop_target->status == TARST_SYNC_NEG) { |
539 | /* we initiated sync negotiation */ |
540 | siop_target->status = TARST_OK; |
541 | #ifdef DEBUG |
542 | printf("sdtr: sync %d offset %d\n" , sync, offset); |
543 | #endif |
544 | if (offset > maxoffset || sync < sc->st_minsync || |
545 | sync > sc->st_maxsync) |
546 | goto reject; |
547 | for (i = 0; i < __arraycount(scf_period); i++) { |
548 | if (sc->clock_period != scf_period[i].clock) |
549 | continue; |
550 | if (scf_period[i].period == sync) { |
551 | /* ok, found it. we now are sync. */ |
552 | siop_target->offset = offset; |
553 | siop_target->period = sync; |
554 | sc->targets[target]->id &= |
555 | ~(SCNTL3_SCF_MASK << 24); |
556 | sc->targets[target]->id |= scf_period[i].scf |
557 | << (24 + SCNTL3_SCF_SHIFT); |
558 | if (sync < 25 && /* Ultra */ |
559 | (sc->features & SF_BUS_ULTRA3) == 0) |
560 | sc->targets[target]->id |= |
561 | SCNTL3_ULTRA << 24; |
562 | else |
563 | sc->targets[target]->id &= |
564 | ~(SCNTL3_ULTRA << 24); |
565 | sc->targets[target]->id &= |
566 | ~(SXFER_MO_MASK << 8); |
567 | sc->targets[target]->id |= |
568 | (offset & SXFER_MO_MASK) << 8; |
569 | sc->targets[target]->id &= ~0xff; /* scntl4 */ |
570 | goto end; |
571 | } |
572 | } |
573 | /* |
574 | * we didn't find it in our table, do async and send reject |
575 | * msg |
576 | */ |
577 | reject: |
578 | send_msgout = 1; |
579 | tables->t_msgout.count = siop_htoc32(sc, 1); |
580 | tables->msg_out[0] = MSG_MESSAGE_REJECT; |
581 | sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); |
582 | sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); |
583 | sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); |
584 | sc->targets[target]->id &= ~0xff; /* scntl4 */ |
585 | siop_target->offset = siop_target->period = 0; |
586 | } else { /* target initiated sync neg */ |
587 | #ifdef DEBUG |
588 | printf("sdtr (target): sync %d offset %d\n" , sync, offset); |
589 | #endif |
590 | if (offset == 0 || sync > sc->st_maxsync) { /* async */ |
591 | goto async; |
592 | } |
593 | if (offset > maxoffset) |
594 | offset = maxoffset; |
595 | if (sync < sc->st_minsync) |
596 | sync = sc->st_minsync; |
597 | /* look for sync period */ |
598 | for (i = 0; i < __arraycount(scf_period); i++) { |
599 | if (sc->clock_period != scf_period[i].clock) |
600 | continue; |
601 | if (scf_period[i].period == sync) { |
602 | /* ok, found it. we now are sync. */ |
603 | siop_target->offset = offset; |
604 | siop_target->period = sync; |
605 | sc->targets[target]->id &= |
606 | ~(SCNTL3_SCF_MASK << 24); |
607 | sc->targets[target]->id |= scf_period[i].scf |
608 | << (24 + SCNTL3_SCF_SHIFT); |
609 | if (sync < 25 && /* Ultra */ |
610 | (sc->features & SF_BUS_ULTRA3) == 0) |
611 | sc->targets[target]->id |= |
612 | SCNTL3_ULTRA << 24; |
613 | else |
614 | sc->targets[target]->id &= |
615 | ~(SCNTL3_ULTRA << 24); |
616 | sc->targets[target]->id &= |
617 | ~(SXFER_MO_MASK << 8); |
618 | sc->targets[target]->id |= |
619 | (offset & SXFER_MO_MASK) << 8; |
620 | sc->targets[target]->id &= ~0xff; /* scntl4 */ |
621 | siop_sdtr_msg(siop_cmd, 0, sync, offset); |
622 | send_msgout = 1; |
623 | goto end; |
624 | } |
625 | } |
626 | async: |
627 | siop_target->offset = siop_target->period = 0; |
628 | sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); |
629 | sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); |
630 | sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); |
631 | sc->targets[target]->id &= ~0xff; /* scntl4 */ |
632 | siop_sdtr_msg(siop_cmd, 0, 0, 0); |
633 | send_msgout = 1; |
634 | } |
635 | end: |
636 | if (siop_target->status == TARST_OK) |
637 | siop_update_xfer_mode(sc, target); |
638 | #ifdef DEBUG |
639 | printf("id now 0x%x\n" , sc->targets[target]->id); |
640 | #endif |
641 | tables->id = siop_htoc32(sc, sc->targets[target]->id); |
642 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, |
643 | (sc->targets[target]->id >> 24) & 0xff); |
644 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, |
645 | (sc->targets[target]->id >> 8) & 0xff); |
646 | if (send_msgout) { |
647 | return SIOP_NEG_MSGOUT; |
648 | } else { |
649 | return SIOP_NEG_ACK; |
650 | } |
651 | } |
652 | |
653 | void |
654 | siop_sdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff) |
655 | { |
656 | |
657 | siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; |
658 | siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN; |
659 | siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR; |
660 | siop_cmd->siop_tables->msg_out[offset + 3] = ssync; |
661 | siop_cmd->siop_tables->msg_out[offset + 4] = soff; |
662 | siop_cmd->siop_tables->t_msgout.count = |
663 | siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2); |
664 | } |
665 | |
666 | void |
667 | siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide) |
668 | { |
669 | |
670 | siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; |
671 | siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN; |
672 | siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR; |
673 | siop_cmd->siop_tables->msg_out[offset + 3] = wide; |
674 | siop_cmd->siop_tables->t_msgout.count = |
675 | siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2); |
676 | } |
677 | |
678 | void |
679 | siop_ppr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff) |
680 | { |
681 | |
682 | siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; |
683 | siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN; |
684 | siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR; |
685 | siop_cmd->siop_tables->msg_out[offset + 3] = ssync; |
686 | siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */ |
687 | siop_cmd->siop_tables->msg_out[offset + 5] = soff; |
688 | siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */ |
689 | siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT; |
690 | siop_cmd->siop_tables->t_msgout.count = |
691 | siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2); |
692 | } |
693 | |
694 | void |
695 | siop_minphys(struct buf *bp) |
696 | { |
697 | |
698 | minphys(bp); |
699 | } |
700 | |
701 | int |
702 | siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg, |
703 | int flag, struct proc *p) |
704 | { |
705 | struct siop_common_softc *sc; |
706 | |
707 | sc = device_private(chan->chan_adapter->adapt_dev); |
708 | |
709 | switch (cmd) { |
710 | case SCBUSIORESET: |
711 | /* |
712 | * abort the script. This will trigger an interrupt, which will |
713 | * trigger a bus reset. |
714 | * We can't safely trigger the reset here as we can't access |
715 | * the required register while the script is running. |
716 | */ |
717 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT); |
718 | return (0); |
719 | default: |
720 | return (ENOTTY); |
721 | } |
722 | } |
723 | |
724 | void |
725 | siop_ma(struct siop_common_cmd *siop_cmd) |
726 | { |
727 | int offset, dbc, sstat; |
728 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
729 | #ifdef DEBUG_DR |
730 | scr_table_t *table; /* table with partial xfer */ |
731 | #endif |
732 | |
733 | /* |
734 | * compute how much of the current table didn't get handled when |
735 | * a phase mismatch occurs |
736 | */ |
737 | if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) |
738 | == 0) |
739 | return; /* no valid data transfer */ |
740 | |
741 | offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); |
742 | if (offset >= SIOP_NSG) { |
743 | aprint_error_dev(sc->sc_dev, "bad offset in siop_sdp (%d)\n" , |
744 | offset); |
745 | return; |
746 | } |
747 | #ifdef DEBUG_DR |
748 | table = &siop_cmd->siop_tables->data[offset]; |
749 | printf("siop_ma: offset %d count=%d addr=0x%x " , offset, |
750 | table->count, table->addr); |
751 | #endif |
752 | dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff; |
753 | if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) { |
754 | if (sc->features & SF_CHIP_DFBC) { |
755 | dbc += |
756 | bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC); |
757 | } else { |
758 | /* need to account stale data in FIFO */ |
759 | int dfifo = |
760 | bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO); |
761 | if (sc->features & SF_CHIP_FIFO) { |
762 | dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh, |
763 | SIOP_CTEST5) & CTEST5_BOMASK) << 8; |
764 | dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff; |
765 | } else { |
766 | dbc += (dfifo - (dbc & 0x7f)) & 0x7f; |
767 | } |
768 | } |
769 | sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0); |
770 | if (sstat & SSTAT0_OLF) |
771 | dbc++; |
772 | if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0) |
773 | dbc++; |
774 | if (siop_cmd->siop_target->flags & TARF_ISWIDE) { |
775 | sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, |
776 | SIOP_SSTAT2); |
777 | if (sstat & SSTAT2_OLF1) |
778 | dbc++; |
779 | if ((sstat & SSTAT2_ORF1) && |
780 | (sc->features & SF_CHIP_DFBC) == 0) |
781 | dbc++; |
782 | } |
783 | /* clear the FIFO */ |
784 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, |
785 | bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) | |
786 | CTEST3_CLF); |
787 | } |
788 | siop_cmd->flags |= CMDFL_RESID; |
789 | siop_cmd->resid = dbc; |
790 | } |
791 | |
792 | void |
793 | siop_sdp(struct siop_common_cmd *siop_cmd, int offset) |
794 | { |
795 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
796 | scr_table_t *table; |
797 | |
798 | if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) |
799 | == 0) |
800 | return; /* no data pointers to save */ |
801 | |
802 | /* |
803 | * offset == SIOP_NSG may be a valid condition if we get a Save data |
804 | * pointer when the xfer is done. Just ignore the Save data pointer |
805 | * in this case |
806 | */ |
807 | if (offset == SIOP_NSG) |
808 | return; |
809 | #ifdef DIAGNOSTIC |
810 | if (offset > SIOP_NSG) { |
811 | scsipi_printaddr(siop_cmd->xs->xs_periph); |
812 | printf(": offset %d > %d\n" , offset, SIOP_NSG); |
813 | panic("siop_sdp: offset" ); |
814 | } |
815 | #endif |
816 | /* |
817 | * Save data pointer. We do this by adjusting the tables to point |
818 | * at the begginning of the data not yet transfered. |
819 | * offset points to the first table with untransfered data. |
820 | */ |
821 | |
822 | /* |
823 | * before doing that we decrease resid from the ammount of data which |
824 | * has been transfered. |
825 | */ |
826 | siop_update_resid(siop_cmd, offset); |
827 | |
828 | /* |
829 | * First let see if we have a resid from a phase mismatch. If so, |
830 | * we have to adjst the table at offset to remove transfered data. |
831 | */ |
832 | if (siop_cmd->flags & CMDFL_RESID) { |
833 | siop_cmd->flags &= ~CMDFL_RESID; |
834 | table = &siop_cmd->siop_tables->data[offset]; |
835 | /* "cut" already transfered data from this table */ |
836 | table->addr = |
837 | siop_htoc32(sc, siop_ctoh32(sc, table->addr) + |
838 | siop_ctoh32(sc, table->count) - siop_cmd->resid); |
839 | table->count = siop_htoc32(sc, siop_cmd->resid); |
840 | } |
841 | |
842 | /* |
843 | * now we can remove entries which have been transfered. |
844 | * We just move the entries with data left at the beggining of the |
845 | * tables |
846 | */ |
847 | memmove(&siop_cmd->siop_tables->data[0], |
848 | &siop_cmd->siop_tables->data[offset], |
849 | (SIOP_NSG - offset) * sizeof(scr_table_t)); |
850 | } |
851 | |
852 | void |
853 | siop_update_resid(struct siop_common_cmd *siop_cmd, int offset) |
854 | { |
855 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
856 | scr_table_t *table; |
857 | int i; |
858 | |
859 | if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) |
860 | == 0) |
861 | return; /* no data to transfer */ |
862 | |
863 | /* |
864 | * update resid. First account for the table entries which have |
865 | * been fully completed. |
866 | */ |
867 | for (i = 0; i < offset; i++) |
868 | siop_cmd->xs->resid -= |
869 | siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count); |
870 | /* |
871 | * if CMDFL_RESID is set, the last table (pointed by offset) is a |
872 | * partial transfers. If not, offset points to the entry folloing |
873 | * the last full transfer. |
874 | */ |
875 | if (siop_cmd->flags & CMDFL_RESID) { |
876 | table = &siop_cmd->siop_tables->data[offset]; |
877 | siop_cmd->xs->resid -= |
878 | siop_ctoh32(sc, table->count) - siop_cmd->resid; |
879 | } |
880 | } |
881 | |
882 | int |
883 | siop_iwr(struct siop_common_cmd *siop_cmd) |
884 | { |
885 | int offset; |
886 | scr_table_t *table; /* table with IWR */ |
887 | struct siop_common_softc *sc = siop_cmd->siop_sc; |
888 | |
889 | /* handle ignore wide residue messages */ |
890 | |
891 | /* if target isn't wide, reject */ |
892 | if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) { |
893 | siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1); |
894 | siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT; |
895 | return SIOP_NEG_MSGOUT; |
896 | } |
897 | /* get index of current command in table */ |
898 | offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); |
899 | /* |
900 | * if the current table did complete, we're now pointing at the |
901 | * next one. Go back one if we didn't see a phase mismatch. |
902 | */ |
903 | if ((siop_cmd->flags & CMDFL_RESID) == 0) |
904 | offset--; |
905 | table = &siop_cmd->siop_tables->data[offset]; |
906 | |
907 | if ((siop_cmd->flags & CMDFL_RESID) == 0) { |
908 | if (siop_ctoh32(sc, table->count) & 1) { |
909 | /* we really got the number of bytes we expected */ |
910 | return SIOP_NEG_ACK; |
911 | } else { |
912 | /* |
913 | * now we really had a short xfer, by one byte. |
914 | * handle it just as if we had a phase mistmatch |
915 | * (there is a resid of one for this table). |
916 | * Update scratcha1 to reflect the fact that |
917 | * this xfer isn't complete. |
918 | */ |
919 | siop_cmd->flags |= CMDFL_RESID; |
920 | siop_cmd->resid = 1; |
921 | bus_space_write_1(sc->sc_rt, sc->sc_rh, |
922 | SIOP_SCRATCHA + 1, offset); |
923 | return SIOP_NEG_ACK; |
924 | } |
925 | } else { |
926 | /* |
927 | * we already have a short xfer for this table; it's |
928 | * just one byte less than we though it was |
929 | */ |
930 | siop_cmd->resid--; |
931 | return SIOP_NEG_ACK; |
932 | } |
933 | } |
934 | |
935 | void |
936 | siop_clearfifo(struct siop_common_softc *sc) |
937 | { |
938 | int timeout = 0; |
939 | int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3); |
940 | |
941 | #ifdef DEBUG_INTR |
942 | printf("DMA fifo not empty !\n" ); |
943 | #endif |
944 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, |
945 | ctest3 | CTEST3_CLF); |
946 | while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) & |
947 | CTEST3_CLF) != 0) { |
948 | delay(1); |
949 | if (++timeout > 1000) { |
950 | printf("clear fifo failed\n" ); |
951 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, |
952 | bus_space_read_1(sc->sc_rt, sc->sc_rh, |
953 | SIOP_CTEST3) & ~CTEST3_CLF); |
954 | return; |
955 | } |
956 | } |
957 | } |
958 | |
959 | int |
960 | siop_modechange(struct siop_common_softc *sc) |
961 | { |
962 | int retry; |
963 | int sist1, stest2; |
964 | |
965 | for (retry = 0; retry < 5; retry++) { |
966 | /* |
967 | * datasheet says to wait 100ms and re-read SIST1, |
968 | * to check that DIFFSENSE is stable. |
969 | * We may delay() 5 times for 100ms at interrupt time; |
970 | * hopefully this will not happen often. |
971 | */ |
972 | delay(100000); |
973 | (void)bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0); |
974 | sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1); |
975 | if (sist1 & SIEN1_SBMC) |
976 | continue; /* we got an irq again */ |
977 | sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & |
978 | STEST4_MODE_MASK; |
979 | stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2); |
980 | switch(sc->mode) { |
981 | case STEST4_MODE_DIF: |
982 | printf("%s: switching to differential mode\n" , |
983 | device_xname(sc->sc_dev)); |
984 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, |
985 | stest2 | STEST2_DIF); |
986 | break; |
987 | case STEST4_MODE_SE: |
988 | printf("%s: switching to single-ended mode\n" , |
989 | device_xname(sc->sc_dev)); |
990 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, |
991 | stest2 & ~STEST2_DIF); |
992 | break; |
993 | case STEST4_MODE_LVD: |
994 | printf("%s: switching to LVD mode\n" , |
995 | device_xname(sc->sc_dev)); |
996 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, |
997 | stest2 & ~STEST2_DIF); |
998 | break; |
999 | default: |
1000 | aprint_error_dev(sc->sc_dev, "invalid SCSI mode 0x%x\n" , |
1001 | sc->mode); |
1002 | return 0; |
1003 | } |
1004 | return 1; |
1005 | } |
1006 | printf("%s: timeout waiting for DIFFSENSE to stabilise\n" , |
1007 | device_xname(sc->sc_dev)); |
1008 | return 0; |
1009 | } |
1010 | |
1011 | void |
1012 | siop_resetbus(struct siop_common_softc *sc) |
1013 | { |
1014 | int scntl1; |
1015 | |
1016 | scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1); |
1017 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, |
1018 | scntl1 | SCNTL1_RST); |
1019 | /* minimum 25 us, more time won't hurt */ |
1020 | delay(100); |
1021 | bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1); |
1022 | } |
1023 | |
1024 | void |
1025 | siop_update_xfer_mode(struct siop_common_softc *sc, int target) |
1026 | { |
1027 | struct siop_common_target *siop_target = sc->targets[target]; |
1028 | struct scsipi_xfer_mode xm; |
1029 | |
1030 | xm.xm_target = target; |
1031 | xm.xm_mode = 0; |
1032 | xm.xm_period = 0; |
1033 | xm.xm_offset = 0; |
1034 | |
1035 | if (siop_target->flags & TARF_ISWIDE) |
1036 | xm.xm_mode |= PERIPH_CAP_WIDE16; |
1037 | if (siop_target->period) { |
1038 | xm.xm_period = siop_target->period; |
1039 | xm.xm_offset = siop_target->offset; |
1040 | xm.xm_mode |= PERIPH_CAP_SYNC; |
1041 | } |
1042 | if (siop_target->flags & TARF_TAG) { |
1043 | /* 1010 workaround: can't do disconnect if not wide, so can't do tag */ |
1044 | if ((sc->features & SF_CHIP_GEBUG) == 0 || |
1045 | (sc->targets[target]->flags & TARF_ISWIDE)) |
1046 | xm.xm_mode |= PERIPH_CAP_TQING; |
1047 | } |
1048 | |
1049 | scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm); |
1050 | } |
1051 | |