1 | /* $NetBSD: mpt_netbsd.c,v 1.33 2016/05/02 19:18:29 christos Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2003 Wasabi Systems, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. |
8 | * |
9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions |
11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior |
23 | * written permission. |
24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ |
37 | |
38 | /* |
39 | * Copyright (c) 2000, 2001 by Greg Ansley |
40 | * Partially derived from Matt Jacob's ISP driver. |
41 | * |
42 | * Redistribution and use in source and binary forms, with or without |
43 | * modification, are permitted provided that the following conditions |
44 | * are met: |
45 | * 1. Redistributions of source code must retain the above copyright |
46 | * notice immediately at the beginning of the file, without modification, |
47 | * this list of conditions, and the following disclaimer. |
48 | * 2. The name of the author may not be used to endorse or promote products |
49 | * derived from this software without specific prior written permission. |
50 | * |
51 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
52 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
53 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
54 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR |
55 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
56 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
57 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
58 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
59 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
60 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
61 | * SUCH DAMAGE. |
62 | */ |
63 | /* |
64 | * Additional Copyright (c) 2002 by Matthew Jacob under same license. |
65 | */ |
66 | |
67 | /* |
68 | * mpt_netbsd.c: |
69 | * |
70 | * NetBSD-specific routines for LSI Fusion adapters. Includes some |
71 | * bus_dma glue, and SCSIPI glue. |
72 | * |
73 | * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for |
74 | * Wasabi Systems, Inc. |
75 | * |
76 | * Additional contributions by Garrett D'Amore on behalf of TELES AG. |
77 | */ |
78 | |
79 | #include <sys/cdefs.h> |
80 | __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.33 2016/05/02 19:18:29 christos Exp $" ); |
81 | |
82 | #include "bio.h" |
83 | |
84 | #include <dev/ic/mpt.h> /* pulls in all headers */ |
85 | #include <sys/scsiio.h> |
86 | |
87 | #if NBIO > 0 |
88 | #include <dev/biovar.h> |
89 | #endif |
90 | |
91 | static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int); |
92 | static void mpt_timeout(void *); |
93 | static void mpt_restart(mpt_softc_t *, request_t *); |
94 | static void mpt_done(mpt_softc_t *, uint32_t); |
95 | static int mpt_drain_queue(mpt_softc_t *); |
96 | static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *); |
97 | static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *); |
98 | static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *); |
99 | static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t); |
100 | static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *); |
101 | static void mpt_bus_reset(mpt_softc_t *); |
102 | |
103 | static void mpt_scsipi_request(struct scsipi_channel *, |
104 | scsipi_adapter_req_t, void *); |
105 | static void mpt_minphys(struct buf *); |
106 | static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int, |
107 | struct proc *); |
108 | |
109 | #if NBIO > 0 |
110 | static bool mpt_is_raid(mpt_softc_t *); |
111 | static int mpt_bio_ioctl(device_t, u_long, void *); |
112 | static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *); |
113 | static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *); |
114 | static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *); |
115 | static int mpt_bio_ioctl_disk_novol(mpt_softc_t *, struct bioc_disk *); |
116 | static int mpt_bio_ioctl_setstate(mpt_softc_t *, struct bioc_setstate *); |
117 | #endif |
118 | |
119 | void |
120 | mpt_scsipi_attach(mpt_softc_t *mpt) |
121 | { |
122 | struct scsipi_adapter *adapt = &mpt->sc_adapter; |
123 | struct scsipi_channel *chan = &mpt->sc_channel; |
124 | int maxq; |
125 | |
126 | mpt->bus = 0; /* XXX ?? */ |
127 | |
128 | maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ? |
129 | mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); |
130 | |
131 | /* Fill in the scsipi_adapter. */ |
132 | memset(adapt, 0, sizeof(*adapt)); |
133 | adapt->adapt_dev = mpt->sc_dev; |
134 | adapt->adapt_nchannels = 1; |
135 | adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/ |
136 | adapt->adapt_max_periph = maxq - 2; |
137 | adapt->adapt_request = mpt_scsipi_request; |
138 | adapt->adapt_minphys = mpt_minphys; |
139 | adapt->adapt_ioctl = mpt_ioctl; |
140 | |
141 | /* Fill in the scsipi_channel. */ |
142 | memset(chan, 0, sizeof(*chan)); |
143 | chan->chan_adapter = adapt; |
144 | if (mpt->is_sas) { |
145 | chan->chan_bustype = &scsi_sas_bustype; |
146 | } else if (mpt->is_fc) { |
147 | chan->chan_bustype = &scsi_fc_bustype; |
148 | } else { |
149 | chan->chan_bustype = &scsi_bustype; |
150 | } |
151 | chan->chan_channel = 0; |
152 | chan->chan_flags = 0; |
153 | chan->chan_nluns = 8; |
154 | chan->chan_ntargets = mpt->mpt_max_devices; |
155 | chan->chan_id = mpt->mpt_ini_id; |
156 | |
157 | /* |
158 | * Save the output of the config so we can rescan the bus in case of |
159 | * errors |
160 | */ |
161 | mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel, |
162 | scsiprint); |
163 | |
164 | #if NBIO > 0 |
165 | if (mpt_is_raid(mpt)) { |
166 | if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0) |
167 | panic("%s: controller registration failed" , |
168 | device_xname(mpt->sc_dev)); |
169 | } |
170 | #endif |
171 | } |
172 | |
173 | int |
174 | mpt_dma_mem_alloc(mpt_softc_t *mpt) |
175 | { |
176 | bus_dma_segment_t reply_seg, request_seg; |
177 | int reply_rseg, request_rseg; |
178 | bus_addr_t pptr, end; |
179 | char *vptr; |
180 | size_t len; |
181 | int error, i; |
182 | |
183 | /* Check if we have already allocated the reply memory. */ |
184 | if (mpt->reply != NULL) |
185 | return (0); |
186 | |
187 | /* |
188 | * Allocate the request pool. This isn't really DMA'd memory, |
189 | * but it's a convenient place to do it. |
190 | */ |
191 | len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt); |
192 | mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); |
193 | if (mpt->request_pool == NULL) { |
194 | aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n" ); |
195 | return (ENOMEM); |
196 | } |
197 | |
198 | /* |
199 | * Allocate DMA resources for reply buffers. |
200 | */ |
201 | error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, |
202 | &reply_seg, 1, &reply_rseg, 0); |
203 | if (error) { |
204 | aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n" , |
205 | error); |
206 | goto fail_0; |
207 | } |
208 | |
209 | error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE, |
210 | (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/); |
211 | if (error) { |
212 | aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n" , |
213 | error); |
214 | goto fail_1; |
215 | } |
216 | |
217 | error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, |
218 | 0, 0, &mpt->reply_dmap); |
219 | if (error) { |
220 | aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n" , |
221 | error); |
222 | goto fail_2; |
223 | } |
224 | |
225 | error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply, |
226 | PAGE_SIZE, NULL, 0); |
227 | if (error) { |
228 | aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n" , |
229 | error); |
230 | goto fail_3; |
231 | } |
232 | mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr; |
233 | |
234 | /* |
235 | * Allocate DMA resources for request buffers. |
236 | */ |
237 | error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), |
238 | PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0); |
239 | if (error) { |
240 | aprint_error_dev(mpt->sc_dev, "unable to allocate request area, " |
241 | "error = %d\n" , error); |
242 | goto fail_4; |
243 | } |
244 | |
245 | error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg, |
246 | MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0); |
247 | if (error) { |
248 | aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n" , |
249 | error); |
250 | goto fail_5; |
251 | } |
252 | |
253 | error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1, |
254 | MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap); |
255 | if (error) { |
256 | aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, " |
257 | "error = %d\n" , error); |
258 | goto fail_6; |
259 | } |
260 | |
261 | error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request, |
262 | MPT_REQ_MEM_SIZE(mpt), NULL, 0); |
263 | if (error) { |
264 | aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n" , |
265 | error); |
266 | goto fail_7; |
267 | } |
268 | mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr; |
269 | |
270 | pptr = mpt->request_phys; |
271 | vptr = (void *) mpt->request; |
272 | end = pptr + MPT_REQ_MEM_SIZE(mpt); |
273 | |
274 | for (i = 0; pptr < end; i++) { |
275 | request_t *req = &mpt->request_pool[i]; |
276 | req->index = i; |
277 | |
278 | /* Store location of Request Data */ |
279 | req->req_pbuf = pptr; |
280 | req->req_vbuf = vptr; |
281 | |
282 | pptr += MPT_REQUEST_AREA; |
283 | vptr += MPT_REQUEST_AREA; |
284 | |
285 | req->sense_pbuf = (pptr - MPT_SENSE_SIZE); |
286 | req->sense_vbuf = (vptr - MPT_SENSE_SIZE); |
287 | |
288 | error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS, |
289 | MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap); |
290 | if (error) { |
291 | aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, " |
292 | "error = %d\n" , i, error); |
293 | goto fail_8; |
294 | } |
295 | } |
296 | |
297 | return (0); |
298 | |
299 | fail_8: |
300 | for (--i; i >= 0; i--) { |
301 | request_t *req = &mpt->request_pool[i]; |
302 | if (req->dmap != NULL) |
303 | bus_dmamap_destroy(mpt->sc_dmat, req->dmap); |
304 | } |
305 | bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap); |
306 | fail_7: |
307 | bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap); |
308 | fail_6: |
309 | bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE); |
310 | fail_5: |
311 | bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg); |
312 | fail_4: |
313 | bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap); |
314 | fail_3: |
315 | bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap); |
316 | fail_2: |
317 | bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE); |
318 | fail_1: |
319 | bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg); |
320 | fail_0: |
321 | free(mpt->request_pool, M_DEVBUF); |
322 | |
323 | mpt->reply = NULL; |
324 | mpt->request = NULL; |
325 | mpt->request_pool = NULL; |
326 | |
327 | return (error); |
328 | } |
329 | |
330 | int |
331 | mpt_intr(void *arg) |
332 | { |
333 | mpt_softc_t *mpt = arg; |
334 | int nrepl = 0; |
335 | |
336 | if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0) |
337 | return (0); |
338 | |
339 | nrepl = mpt_drain_queue(mpt); |
340 | return (nrepl != 0); |
341 | } |
342 | |
343 | void |
344 | mpt_prt(mpt_softc_t *mpt, const char *fmt, ...) |
345 | { |
346 | va_list ap; |
347 | |
348 | printf("%s: " , device_xname(mpt->sc_dev)); |
349 | va_start(ap, fmt); |
350 | vprintf(fmt, ap); |
351 | va_end(ap); |
352 | printf("\n" ); |
353 | } |
354 | |
355 | static int |
356 | mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count) |
357 | { |
358 | |
359 | /* Timeouts are in msec, so we loop in 1000usec cycles */ |
360 | while (count) { |
361 | mpt_intr(mpt); |
362 | if (xs->xs_status & XS_STS_DONE) |
363 | return (0); |
364 | delay(1000); /* only happens in boot, so ok */ |
365 | count--; |
366 | } |
367 | return (1); |
368 | } |
369 | |
370 | static void |
371 | mpt_timeout(void *arg) |
372 | { |
373 | request_t *req = arg; |
374 | struct scsipi_xfer *xs; |
375 | struct scsipi_periph *periph; |
376 | mpt_softc_t *mpt; |
377 | uint32_t oseq; |
378 | int s, nrepl = 0; |
379 | |
380 | if (req->xfer == NULL) { |
381 | printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n" , |
382 | req->index, req->sequence); |
383 | return; |
384 | } |
385 | xs = req->xfer; |
386 | periph = xs->xs_periph; |
387 | mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev); |
388 | scsipi_printaddr(periph); |
389 | printf("command timeout\n" ); |
390 | |
391 | s = splbio(); |
392 | |
393 | oseq = req->sequence; |
394 | mpt->timeouts++; |
395 | if (mpt_intr(mpt)) { |
396 | if (req->sequence != oseq) { |
397 | mpt->success++; |
398 | mpt_prt(mpt, "recovered from command timeout" ); |
399 | splx(s); |
400 | return; |
401 | } |
402 | } |
403 | |
404 | /* |
405 | * Ensure the IOC is really done giving us data since it appears it can |
406 | * sometimes fail to give us interrupts under heavy load. |
407 | */ |
408 | nrepl = mpt_drain_queue(mpt); |
409 | if (nrepl ) { |
410 | mpt_prt(mpt, "mpt_timeout: recovered %d commands" ,nrepl); |
411 | } |
412 | |
413 | if (req->sequence != oseq) { |
414 | mpt->success++; |
415 | splx(s); |
416 | return; |
417 | } |
418 | |
419 | mpt_prt(mpt, |
420 | "timeout on request index = 0x%x, seq = 0x%08x" , |
421 | req->index, req->sequence); |
422 | mpt_check_doorbell(mpt); |
423 | mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x" , |
424 | mpt_read(mpt, MPT_OFFSET_INTR_STATUS), |
425 | mpt_read(mpt, MPT_OFFSET_INTR_MASK), |
426 | mpt_read(mpt, MPT_OFFSET_DOORBELL)); |
427 | mpt_prt(mpt, "request state: %s" , mpt_req_state(req->debug)); |
428 | if (mpt->verbose > 1) |
429 | mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); |
430 | |
431 | xs->error = XS_TIMEOUT; |
432 | splx(s); |
433 | mpt_restart(mpt, req); |
434 | } |
435 | |
436 | static void |
437 | mpt_restart(mpt_softc_t *mpt, request_t *req0) |
438 | { |
439 | int i, s, nreq; |
440 | request_t *req; |
441 | struct scsipi_xfer *xs; |
442 | |
443 | /* first, reset the IOC, leaving stopped so all requests are idle */ |
444 | if (mpt_soft_reset(mpt) != MPT_OK) { |
445 | mpt_prt(mpt, "soft reset failed" ); |
446 | /* |
447 | * Don't try a hard reset since this mangles the PCI |
448 | * configuration registers. |
449 | */ |
450 | return; |
451 | } |
452 | |
453 | /* Freeze the channel so scsipi doesn't queue more commands. */ |
454 | scsipi_channel_freeze(&mpt->sc_channel, 1); |
455 | |
456 | /* Return all pending requests to scsipi and de-allocate them. */ |
457 | s = splbio(); |
458 | nreq = 0; |
459 | for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) { |
460 | req = &mpt->request_pool[i]; |
461 | xs = req->xfer; |
462 | if (xs != NULL) { |
463 | if (xs->datalen != 0) |
464 | bus_dmamap_unload(mpt->sc_dmat, req->dmap); |
465 | req->xfer = NULL; |
466 | callout_stop(&xs->xs_callout); |
467 | if (req != req0) { |
468 | nreq++; |
469 | xs->error = XS_REQUEUE; |
470 | } |
471 | scsipi_done(xs); |
472 | /* |
473 | * Don't need to mpt_free_request() since mpt_init() |
474 | * below will free all requests anyway. |
475 | */ |
476 | mpt_free_request(mpt, req); |
477 | } |
478 | } |
479 | splx(s); |
480 | if (nreq > 0) |
481 | mpt_prt(mpt, "re-queued %d requests" , nreq); |
482 | |
483 | /* Re-initialize the IOC (which restarts it). */ |
484 | if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0) |
485 | mpt_prt(mpt, "restart succeeded" ); |
486 | /* else error message already printed */ |
487 | |
488 | /* Thaw the channel, causing scsipi to re-queue the commands. */ |
489 | scsipi_channel_thaw(&mpt->sc_channel, 1); |
490 | } |
491 | |
492 | static int |
493 | mpt_drain_queue(mpt_softc_t *mpt) |
494 | { |
495 | int nrepl = 0; |
496 | uint32_t reply; |
497 | |
498 | reply = mpt_pop_reply_queue(mpt); |
499 | while (reply != MPT_REPLY_EMPTY) { |
500 | nrepl++; |
501 | if (mpt->verbose > 1) { |
502 | if ((reply & MPT_CONTEXT_REPLY) != 0) { |
503 | /* Address reply; IOC has something to say */ |
504 | mpt_print_reply(MPT_REPLY_PTOV(mpt, reply)); |
505 | } else { |
506 | /* Context reply; all went well */ |
507 | mpt_prt(mpt, "context %u reply OK" , reply); |
508 | } |
509 | } |
510 | mpt_done(mpt, reply); |
511 | reply = mpt_pop_reply_queue(mpt); |
512 | } |
513 | return (nrepl); |
514 | } |
515 | |
516 | static void |
517 | mpt_done(mpt_softc_t *mpt, uint32_t reply) |
518 | { |
519 | struct scsipi_xfer *xs = NULL; |
520 | struct scsipi_periph *periph; |
521 | int index; |
522 | request_t *req; |
523 | MSG_REQUEST_HEADER *mpt_req; |
524 | MSG_SCSI_IO_REPLY *mpt_reply; |
525 | int restart = 0; /* nonzero if we need to restart the IOC*/ |
526 | |
527 | if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) { |
528 | /* context reply (ok) */ |
529 | mpt_reply = NULL; |
530 | index = reply & MPT_CONTEXT_MASK; |
531 | } else { |
532 | /* address reply (error) */ |
533 | |
534 | /* XXX BUS_DMASYNC_POSTREAD XXX */ |
535 | mpt_reply = MPT_REPLY_PTOV(mpt, reply); |
536 | if (mpt_reply != NULL) { |
537 | if (mpt->verbose > 1) { |
538 | uint32_t *pReply = (uint32_t *) mpt_reply; |
539 | |
540 | mpt_prt(mpt, "Address Reply (index %u):" , |
541 | le32toh(mpt_reply->MsgContext) & 0xffff); |
542 | mpt_prt(mpt, "%08x %08x %08x %08x" , pReply[0], |
543 | pReply[1], pReply[2], pReply[3]); |
544 | mpt_prt(mpt, "%08x %08x %08x %08x" , pReply[4], |
545 | pReply[5], pReply[6], pReply[7]); |
546 | mpt_prt(mpt, "%08x %08x %08x %08x" , pReply[8], |
547 | pReply[9], pReply[10], pReply[11]); |
548 | } |
549 | index = le32toh(mpt_reply->MsgContext); |
550 | } else |
551 | index = reply & MPT_CONTEXT_MASK; |
552 | } |
553 | |
554 | /* |
555 | * Address reply with MessageContext high bit set. |
556 | * This is most likely a notify message, so we try |
557 | * to process it, then free it. |
558 | */ |
559 | if (__predict_false((index & 0x80000000) != 0)) { |
560 | if (mpt_reply != NULL) |
561 | mpt_ctlop(mpt, mpt_reply, reply); |
562 | else |
563 | mpt_prt(mpt, "%s: index 0x%x, NULL reply" , __func__, |
564 | index); |
565 | return; |
566 | } |
567 | |
568 | /* Did we end up with a valid index into the table? */ |
569 | if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) { |
570 | mpt_prt(mpt, "%s: invalid index (0x%x) in reply" , __func__, |
571 | index); |
572 | return; |
573 | } |
574 | |
575 | req = &mpt->request_pool[index]; |
576 | |
577 | /* Make sure memory hasn't been trashed. */ |
578 | if (__predict_false(req->index != index)) { |
579 | mpt_prt(mpt, "%s: corrupted request_t (0x%x)" , __func__, |
580 | index); |
581 | return; |
582 | } |
583 | |
584 | MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
585 | mpt_req = req->req_vbuf; |
586 | |
587 | /* Short cut for task management replies; nothing more for us to do. */ |
588 | if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) { |
589 | if (mpt->verbose > 1) |
590 | mpt_prt(mpt, "%s: TASK MGMT" , __func__); |
591 | KASSERT(req == mpt->mngt_req); |
592 | mpt->mngt_req = NULL; |
593 | goto done; |
594 | } |
595 | |
596 | if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE)) |
597 | goto done; |
598 | |
599 | /* |
600 | * At this point, it had better be a SCSI I/O command, but don't |
601 | * crash if it isn't. |
602 | */ |
603 | if (__predict_false(mpt_req->Function != |
604 | MPI_FUNCTION_SCSI_IO_REQUEST)) { |
605 | if (mpt->verbose > 1) |
606 | mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)" , |
607 | __func__, mpt_req->Function, index); |
608 | goto done; |
609 | } |
610 | |
611 | /* Recover scsipi_xfer from the request structure. */ |
612 | xs = req->xfer; |
613 | |
614 | /* Can't have a SCSI command without a scsipi_xfer. */ |
615 | if (__predict_false(xs == NULL)) { |
616 | mpt_prt(mpt, |
617 | "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x" , __func__, |
618 | req->index, req->sequence); |
619 | mpt_prt(mpt, "request state: %s" , mpt_req_state(req->debug)); |
620 | mpt_prt(mpt, "mpt_request:" ); |
621 | mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); |
622 | |
623 | if (mpt_reply != NULL) { |
624 | mpt_prt(mpt, "mpt_reply:" ); |
625 | mpt_print_reply(mpt_reply); |
626 | } else { |
627 | mpt_prt(mpt, "context reply: 0x%08x" , reply); |
628 | } |
629 | goto done; |
630 | } |
631 | |
632 | callout_stop(&xs->xs_callout); |
633 | |
634 | periph = xs->xs_periph; |
635 | |
636 | /* |
637 | * If we were a data transfer, unload the map that described |
638 | * the data buffer. |
639 | */ |
640 | if (__predict_true(xs->datalen != 0)) { |
641 | bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, |
642 | req->dmap->dm_mapsize, |
643 | (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD |
644 | : BUS_DMASYNC_POSTWRITE); |
645 | bus_dmamap_unload(mpt->sc_dmat, req->dmap); |
646 | } |
647 | |
648 | if (__predict_true(mpt_reply == NULL)) { |
649 | /* |
650 | * Context reply; report that the command was |
651 | * successful! |
652 | * |
653 | * Also report the xfer mode, if necessary. |
654 | */ |
655 | if (__predict_false(mpt->mpt_report_xfer_mode != 0)) { |
656 | if ((mpt->mpt_report_xfer_mode & |
657 | (1 << periph->periph_target)) != 0) |
658 | mpt_get_xfer_mode(mpt, periph); |
659 | } |
660 | xs->error = XS_NOERROR; |
661 | xs->status = SCSI_OK; |
662 | xs->resid = 0; |
663 | mpt_free_request(mpt, req); |
664 | scsipi_done(xs); |
665 | return; |
666 | } |
667 | |
668 | xs->status = mpt_reply->SCSIStatus; |
669 | switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) { |
670 | case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: |
671 | xs->error = XS_DRIVER_STUFFUP; |
672 | mpt_prt(mpt, "%s: IOC overrun!" , __func__); |
673 | break; |
674 | |
675 | case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: |
676 | /* |
677 | * Yikes! Tagged queue full comes through this path! |
678 | * |
679 | * So we'll change it to a status error and anything |
680 | * that returns status should probably be a status |
681 | * error as well. |
682 | */ |
683 | xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount); |
684 | if (mpt_reply->SCSIState & |
685 | MPI_SCSI_STATE_NO_SCSI_STATUS) { |
686 | xs->error = XS_DRIVER_STUFFUP; |
687 | break; |
688 | } |
689 | /* FALLTHROUGH */ |
690 | case MPI_IOCSTATUS_SUCCESS: |
691 | case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: |
692 | switch (xs->status) { |
693 | case SCSI_OK: |
694 | /* Report the xfer mode, if necessary. */ |
695 | if ((mpt->mpt_report_xfer_mode & |
696 | (1 << periph->periph_target)) != 0) |
697 | mpt_get_xfer_mode(mpt, periph); |
698 | xs->resid = 0; |
699 | break; |
700 | |
701 | case SCSI_CHECK: |
702 | xs->error = XS_SENSE; |
703 | break; |
704 | |
705 | case SCSI_BUSY: |
706 | case SCSI_QUEUE_FULL: |
707 | xs->error = XS_BUSY; |
708 | break; |
709 | |
710 | default: |
711 | scsipi_printaddr(periph); |
712 | printf("invalid status code %d\n" , xs->status); |
713 | xs->error = XS_DRIVER_STUFFUP; |
714 | break; |
715 | } |
716 | break; |
717 | |
718 | case MPI_IOCSTATUS_BUSY: |
719 | case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: |
720 | xs->error = XS_RESOURCE_SHORTAGE; |
721 | break; |
722 | |
723 | case MPI_IOCSTATUS_SCSI_INVALID_BUS: |
724 | case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: |
725 | case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: |
726 | xs->error = XS_SELTIMEOUT; |
727 | break; |
728 | |
729 | case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: |
730 | xs->error = XS_DRIVER_STUFFUP; |
731 | mpt_prt(mpt, "%s: IOC SCSI residual mismatch!" , __func__); |
732 | restart = 1; |
733 | break; |
734 | |
735 | case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: |
736 | /* XXX What should we do here? */ |
737 | mpt_prt(mpt, "%s: IOC SCSI task terminated!" , __func__); |
738 | restart = 1; |
739 | break; |
740 | |
741 | case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: |
742 | /* XXX */ |
743 | xs->error = XS_DRIVER_STUFFUP; |
744 | mpt_prt(mpt, "%s: IOC SCSI task failed!" , __func__); |
745 | restart = 1; |
746 | break; |
747 | |
748 | case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: |
749 | /* XXX */ |
750 | xs->error = XS_DRIVER_STUFFUP; |
751 | mpt_prt(mpt, "%s: IOC task terminated!" , __func__); |
752 | restart = 1; |
753 | break; |
754 | |
755 | case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: |
756 | /* XXX This is a bus-reset */ |
757 | xs->error = XS_DRIVER_STUFFUP; |
758 | mpt_prt(mpt, "%s: IOC SCSI bus reset!" , __func__); |
759 | restart = 1; |
760 | break; |
761 | |
762 | case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: |
763 | /* |
764 | * FreeBSD and Linux indicate this is a phase error between |
765 | * the IOC and the drive itself. When this happens, the IOC |
766 | * becomes unhappy and stops processing all transactions. |
767 | * Call mpt_timeout which knows how to get the IOC back |
768 | * on its feet. |
769 | */ |
770 | mpt_prt(mpt, "%s: IOC indicates protocol error -- " |
771 | "recovering..." , __func__); |
772 | xs->error = XS_TIMEOUT; |
773 | restart = 1; |
774 | |
775 | break; |
776 | |
777 | default: |
778 | /* XXX unrecognized HBA error */ |
779 | xs->error = XS_DRIVER_STUFFUP; |
780 | mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x" , __func__, |
781 | le16toh(mpt_reply->IOCStatus)); |
782 | restart = 1; |
783 | break; |
784 | } |
785 | |
786 | if (mpt_reply != NULL) { |
787 | if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { |
788 | memcpy(&xs->sense.scsi_sense, req->sense_vbuf, |
789 | sizeof(xs->sense.scsi_sense)); |
790 | } else if (mpt_reply->SCSIState & |
791 | MPI_SCSI_STATE_AUTOSENSE_FAILED) { |
792 | /* |
793 | * This will cause the scsipi layer to issue |
794 | * a REQUEST SENSE. |
795 | */ |
796 | if (xs->status == SCSI_CHECK) |
797 | xs->error = XS_BUSY; |
798 | } |
799 | } |
800 | |
801 | done: |
802 | if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) & |
803 | MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { |
804 | mpt_prt(mpt, "%s: IOC has error - logging...\n" , __func__); |
805 | mpt_ctlop(mpt, mpt_reply, reply); |
806 | } |
807 | |
808 | /* If IOC done with this request, free it up. */ |
809 | if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0) |
810 | mpt_free_request(mpt, req); |
811 | |
812 | /* If address reply, give the buffer back to the IOC. */ |
813 | if (mpt_reply != NULL) |
814 | mpt_free_reply(mpt, (reply << 1)); |
815 | |
816 | if (xs != NULL) |
817 | scsipi_done(xs); |
818 | |
819 | if (restart) { |
820 | mpt_prt(mpt, "%s: IOC fatal error: restarting..." , __func__); |
821 | mpt_restart(mpt, NULL); |
822 | } |
823 | } |
824 | |
825 | static void |
826 | mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs) |
827 | { |
828 | struct scsipi_periph *periph = xs->xs_periph; |
829 | request_t *req; |
830 | MSG_SCSI_IO_REQUEST *mpt_req; |
831 | int error, s; |
832 | |
833 | s = splbio(); |
834 | req = mpt_get_request(mpt); |
835 | if (__predict_false(req == NULL)) { |
836 | /* This should happen very infrequently. */ |
837 | xs->error = XS_RESOURCE_SHORTAGE; |
838 | scsipi_done(xs); |
839 | splx(s); |
840 | return; |
841 | } |
842 | splx(s); |
843 | |
844 | /* Link the req and the scsipi_xfer. */ |
845 | req->xfer = xs; |
846 | |
847 | /* Now we build the command for the IOC */ |
848 | mpt_req = req->req_vbuf; |
849 | memset(mpt_req, 0, sizeof(*mpt_req)); |
850 | |
851 | mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; |
852 | mpt_req->Bus = mpt->bus; |
853 | |
854 | mpt_req->SenseBufferLength = |
855 | (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ? |
856 | sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE; |
857 | |
858 | /* |
859 | * We use the message context to find the request structure when |
860 | * we get the command completion interrupt from the IOC. |
861 | */ |
862 | mpt_req->MsgContext = htole32(req->index); |
863 | |
864 | /* Which physical device to do the I/O on. */ |
865 | mpt_req->TargetID = periph->periph_target; |
866 | mpt_req->LUN[1] = periph->periph_lun; |
867 | |
868 | /* Set the direction of the transfer. */ |
869 | if (xs->xs_control & XS_CTL_DATA_IN) |
870 | mpt_req->Control = MPI_SCSIIO_CONTROL_READ; |
871 | else if (xs->xs_control & XS_CTL_DATA_OUT) |
872 | mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; |
873 | else |
874 | mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; |
875 | |
876 | /* Set the queue behavior. */ |
877 | if (__predict_true((!mpt->is_scsi) || |
878 | (mpt->mpt_tag_enable & |
879 | (1 << periph->periph_target)))) { |
880 | switch (XS_CTL_TAGTYPE(xs)) { |
881 | case XS_CTL_HEAD_TAG: |
882 | mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; |
883 | break; |
884 | |
885 | #if 0 /* XXX */ |
886 | case XS_CTL_ACA_TAG: |
887 | mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; |
888 | break; |
889 | #endif |
890 | |
891 | case XS_CTL_ORDERED_TAG: |
892 | mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; |
893 | break; |
894 | |
895 | case XS_CTL_SIMPLE_TAG: |
896 | mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; |
897 | break; |
898 | |
899 | default: |
900 | if (mpt->is_scsi) |
901 | mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; |
902 | else |
903 | mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; |
904 | break; |
905 | } |
906 | } else |
907 | mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; |
908 | |
909 | if (__predict_false(mpt->is_scsi && |
910 | (mpt->mpt_disc_enable & |
911 | (1 << periph->periph_target)) == 0)) |
912 | mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; |
913 | |
914 | mpt_req->Control = htole32(mpt_req->Control); |
915 | |
916 | /* Copy the SCSI command block into place. */ |
917 | memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen); |
918 | |
919 | mpt_req->CDBLength = xs->cmdlen; |
920 | mpt_req->DataLength = htole32(xs->datalen); |
921 | mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); |
922 | |
923 | /* |
924 | * Map the DMA transfer. |
925 | */ |
926 | if (xs->datalen) { |
927 | SGE_SIMPLE32 *se; |
928 | |
929 | error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data, |
930 | xs->datalen, NULL, |
931 | ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT |
932 | : BUS_DMA_WAITOK) | |
933 | BUS_DMA_STREAMING | |
934 | ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ |
935 | : BUS_DMA_WRITE)); |
936 | switch (error) { |
937 | case 0: |
938 | break; |
939 | |
940 | case ENOMEM: |
941 | case EAGAIN: |
942 | xs->error = XS_RESOURCE_SHORTAGE; |
943 | goto out_bad; |
944 | |
945 | default: |
946 | xs->error = XS_DRIVER_STUFFUP; |
947 | mpt_prt(mpt, "error %d loading DMA map" , error); |
948 | out_bad: |
949 | s = splbio(); |
950 | mpt_free_request(mpt, req); |
951 | scsipi_done(xs); |
952 | splx(s); |
953 | return; |
954 | } |
955 | |
956 | if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) { |
957 | int seg, i, nleft = req->dmap->dm_nsegs; |
958 | uint32_t flags; |
959 | SGE_CHAIN32 *ce; |
960 | |
961 | seg = 0; |
962 | flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; |
963 | if (xs->xs_control & XS_CTL_DATA_OUT) |
964 | flags |= MPI_SGE_FLAGS_HOST_TO_IOC; |
965 | |
966 | se = (SGE_SIMPLE32 *) &mpt_req->SGL; |
967 | for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; |
968 | i++, se++, seg++) { |
969 | uint32_t tf; |
970 | |
971 | memset(se, 0, sizeof(*se)); |
972 | se->Address = |
973 | htole32(req->dmap->dm_segs[seg].ds_addr); |
974 | MPI_pSGE_SET_LENGTH(se, |
975 | req->dmap->dm_segs[seg].ds_len); |
976 | tf = flags; |
977 | if (i == MPT_NSGL_FIRST(mpt) - 2) |
978 | tf |= MPI_SGE_FLAGS_LAST_ELEMENT; |
979 | MPI_pSGE_SET_FLAGS(se, tf); |
980 | se->FlagsLength = htole32(se->FlagsLength); |
981 | nleft--; |
982 | } |
983 | |
984 | /* |
985 | * Tell the IOC where to find the first chain element. |
986 | */ |
987 | mpt_req->ChainOffset = |
988 | ((char *)se - (char *)mpt_req) >> 2; |
989 | |
990 | /* |
991 | * Until we're finished with all segments... |
992 | */ |
993 | while (nleft) { |
994 | int ntodo; |
995 | |
996 | /* |
997 | * Construct the chain element that points to |
998 | * the next segment. |
999 | */ |
1000 | ce = (SGE_CHAIN32 *) se++; |
1001 | if (nleft > MPT_NSGL(mpt)) { |
1002 | ntodo = MPT_NSGL(mpt) - 1; |
1003 | ce->NextChainOffset = (MPT_RQSL(mpt) - |
1004 | sizeof(SGE_SIMPLE32)) >> 2; |
1005 | ce->Length = htole16(MPT_NSGL(mpt) |
1006 | * sizeof(SGE_SIMPLE32)); |
1007 | } else { |
1008 | ntodo = nleft; |
1009 | ce->NextChainOffset = 0; |
1010 | ce->Length = htole16(ntodo |
1011 | * sizeof(SGE_SIMPLE32)); |
1012 | } |
1013 | ce->Address = htole32(req->req_pbuf + |
1014 | ((char *)se - (char *)mpt_req)); |
1015 | ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; |
1016 | for (i = 0; i < ntodo; i++, se++, seg++) { |
1017 | uint32_t tf; |
1018 | |
1019 | memset(se, 0, sizeof(*se)); |
1020 | se->Address = htole32( |
1021 | req->dmap->dm_segs[seg].ds_addr); |
1022 | MPI_pSGE_SET_LENGTH(se, |
1023 | req->dmap->dm_segs[seg].ds_len); |
1024 | tf = flags; |
1025 | if (i == ntodo - 1) { |
1026 | tf |= |
1027 | MPI_SGE_FLAGS_LAST_ELEMENT; |
1028 | if (ce->NextChainOffset == 0) { |
1029 | tf |= |
1030 | MPI_SGE_FLAGS_END_OF_LIST | |
1031 | MPI_SGE_FLAGS_END_OF_BUFFER; |
1032 | } |
1033 | } |
1034 | MPI_pSGE_SET_FLAGS(se, tf); |
1035 | se->FlagsLength = |
1036 | htole32(se->FlagsLength); |
1037 | nleft--; |
1038 | } |
1039 | } |
1040 | bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, |
1041 | req->dmap->dm_mapsize, |
1042 | (xs->xs_control & XS_CTL_DATA_IN) ? |
1043 | BUS_DMASYNC_PREREAD |
1044 | : BUS_DMASYNC_PREWRITE); |
1045 | } else { |
1046 | int i; |
1047 | uint32_t flags; |
1048 | |
1049 | flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; |
1050 | if (xs->xs_control & XS_CTL_DATA_OUT) |
1051 | flags |= MPI_SGE_FLAGS_HOST_TO_IOC; |
1052 | |
1053 | /* Copy the segments into our SG list. */ |
1054 | se = (SGE_SIMPLE32 *) &mpt_req->SGL; |
1055 | for (i = 0; i < req->dmap->dm_nsegs; |
1056 | i++, se++) { |
1057 | uint32_t tf; |
1058 | |
1059 | memset(se, 0, sizeof(*se)); |
1060 | se->Address = |
1061 | htole32(req->dmap->dm_segs[i].ds_addr); |
1062 | MPI_pSGE_SET_LENGTH(se, |
1063 | req->dmap->dm_segs[i].ds_len); |
1064 | tf = flags; |
1065 | if (i == req->dmap->dm_nsegs - 1) { |
1066 | tf |= |
1067 | MPI_SGE_FLAGS_LAST_ELEMENT | |
1068 | MPI_SGE_FLAGS_END_OF_BUFFER | |
1069 | MPI_SGE_FLAGS_END_OF_LIST; |
1070 | } |
1071 | MPI_pSGE_SET_FLAGS(se, tf); |
1072 | se->FlagsLength = htole32(se->FlagsLength); |
1073 | } |
1074 | bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, |
1075 | req->dmap->dm_mapsize, |
1076 | (xs->xs_control & XS_CTL_DATA_IN) ? |
1077 | BUS_DMASYNC_PREREAD |
1078 | : BUS_DMASYNC_PREWRITE); |
1079 | } |
1080 | } else { |
1081 | /* |
1082 | * No data to transfer; just make a single simple SGL |
1083 | * with zero length. |
1084 | */ |
1085 | SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL; |
1086 | memset(se, 0, sizeof(*se)); |
1087 | MPI_pSGE_SET_FLAGS(se, |
1088 | (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | |
1089 | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); |
1090 | se->FlagsLength = htole32(se->FlagsLength); |
1091 | } |
1092 | |
1093 | if (mpt->verbose > 1) |
1094 | mpt_print_scsi_io_request(mpt_req); |
1095 | |
1096 | if (xs->timeout == 0) { |
1097 | mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n" , |
1098 | req->index); |
1099 | xs->timeout = 500; |
1100 | } |
1101 | |
1102 | s = splbio(); |
1103 | if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) |
1104 | callout_reset(&xs->xs_callout, |
1105 | mstohz(xs->timeout), mpt_timeout, req); |
1106 | mpt_send_cmd(mpt, req); |
1107 | splx(s); |
1108 | |
1109 | if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) |
1110 | return; |
1111 | |
1112 | /* |
1113 | * If we can't use interrupts, poll on completion. |
1114 | */ |
1115 | if (mpt_poll(mpt, xs, xs->timeout)) |
1116 | mpt_timeout(req); |
1117 | } |
1118 | |
1119 | static void |
1120 | mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm) |
1121 | { |
1122 | fCONFIG_PAGE_SCSI_DEVICE_1 tmp; |
1123 | |
1124 | /* |
1125 | * Always allow disconnect; we don't have a way to disable |
1126 | * it right now, in any case. |
1127 | */ |
1128 | mpt->mpt_disc_enable |= (1 << xm->xm_target); |
1129 | |
1130 | if (xm->xm_mode & PERIPH_CAP_TQING) |
1131 | mpt->mpt_tag_enable |= (1 << xm->xm_target); |
1132 | else |
1133 | mpt->mpt_tag_enable &= ~(1 << xm->xm_target); |
1134 | |
1135 | if (mpt->is_scsi) { |
1136 | /* |
1137 | * SCSI transport settings only make any sense for |
1138 | * SCSI |
1139 | */ |
1140 | |
1141 | tmp = mpt->mpt_dev_page1[xm->xm_target]; |
1142 | |
1143 | /* |
1144 | * Set the wide/narrow parameter for the target. |
1145 | */ |
1146 | if (xm->xm_mode & PERIPH_CAP_WIDE16) |
1147 | tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; |
1148 | else |
1149 | tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; |
1150 | |
1151 | /* |
1152 | * Set the synchronous parameters for the target. |
1153 | * |
1154 | * XXX If we request sync transfers, we just go ahead and |
1155 | * XXX request the maximum available. We need finer control |
1156 | * XXX in order to implement Domain Validation. |
1157 | */ |
1158 | tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK | |
1159 | MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK | |
1160 | MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS | |
1161 | MPI_SCSIDEVPAGE1_RP_IU); |
1162 | if (xm->xm_mode & PERIPH_CAP_SYNC) { |
1163 | int factor, offset, np; |
1164 | |
1165 | factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff; |
1166 | offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff; |
1167 | np = 0; |
1168 | if (factor < 0x9) { |
1169 | /* Ultra320 */ |
1170 | np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU; |
1171 | } |
1172 | if (factor < 0xa) { |
1173 | /* at least Ultra160 */ |
1174 | np |= MPI_SCSIDEVPAGE1_RP_DT; |
1175 | } |
1176 | np |= (factor << 8) | (offset << 16); |
1177 | tmp.RequestedParameters |= np; |
1178 | } |
1179 | |
1180 | host2mpt_config_page_scsi_device_1(&tmp); |
1181 | if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) { |
1182 | mpt_prt(mpt, "unable to write Device Page 1" ); |
1183 | return; |
1184 | } |
1185 | |
1186 | if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) { |
1187 | mpt_prt(mpt, "unable to read back Device Page 1" ); |
1188 | return; |
1189 | } |
1190 | |
1191 | mpt2host_config_page_scsi_device_1(&tmp); |
1192 | mpt->mpt_dev_page1[xm->xm_target] = tmp; |
1193 | if (mpt->verbose > 1) { |
1194 | mpt_prt(mpt, |
1195 | "SPI Target %d Page 1: RequestedParameters %x Config %x" , |
1196 | xm->xm_target, |
1197 | mpt->mpt_dev_page1[xm->xm_target].RequestedParameters, |
1198 | mpt->mpt_dev_page1[xm->xm_target].Configuration); |
1199 | } |
1200 | } |
1201 | |
1202 | /* |
1203 | * Make a note that we should perform an async callback at the |
1204 | * end of the next successful command completion to report the |
1205 | * negotiated transfer mode. |
1206 | */ |
1207 | mpt->mpt_report_xfer_mode |= (1 << xm->xm_target); |
1208 | } |
1209 | |
1210 | static void |
1211 | mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph) |
1212 | { |
1213 | fCONFIG_PAGE_SCSI_DEVICE_0 tmp; |
1214 | struct scsipi_xfer_mode xm; |
1215 | int period, offset; |
1216 | |
1217 | tmp = mpt->mpt_dev_page0[periph->periph_target]; |
1218 | host2mpt_config_page_scsi_device_0(&tmp); |
1219 | if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) { |
1220 | mpt_prt(mpt, "unable to read Device Page 0" ); |
1221 | return; |
1222 | } |
1223 | mpt2host_config_page_scsi_device_0(&tmp); |
1224 | |
1225 | if (mpt->verbose > 1) { |
1226 | mpt_prt(mpt, |
1227 | "SPI Tgt %d Page 0: NParms %x Information %x" , |
1228 | periph->periph_target, |
1229 | tmp.NegotiatedParameters, tmp.Information); |
1230 | } |
1231 | |
1232 | xm.xm_target = periph->periph_target; |
1233 | xm.xm_mode = 0; |
1234 | |
1235 | if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) |
1236 | xm.xm_mode |= PERIPH_CAP_WIDE16; |
1237 | |
1238 | period = (tmp.NegotiatedParameters >> 8) & 0xff; |
1239 | offset = (tmp.NegotiatedParameters >> 16) & 0xff; |
1240 | if (offset) { |
1241 | xm.xm_period = period; |
1242 | xm.xm_offset = offset; |
1243 | xm.xm_mode |= PERIPH_CAP_SYNC; |
1244 | } |
1245 | |
1246 | /* |
1247 | * Tagged queueing is all controlled by us; there is no |
1248 | * other setting to query. |
1249 | */ |
1250 | if (mpt->mpt_tag_enable & (1 << periph->periph_target)) |
1251 | xm.xm_mode |= PERIPH_CAP_TQING; |
1252 | |
1253 | /* |
1254 | * We're going to deliver the async event, so clear the marker. |
1255 | */ |
1256 | mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target); |
1257 | |
1258 | scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); |
1259 | } |
1260 | |
1261 | static void |
1262 | mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply) |
1263 | { |
1264 | MSG_DEFAULT_REPLY *dmsg = vmsg; |
1265 | |
1266 | switch (dmsg->Function) { |
1267 | case MPI_FUNCTION_EVENT_NOTIFICATION: |
1268 | mpt_event_notify_reply(mpt, vmsg); |
1269 | mpt_free_reply(mpt, (reply << 1)); |
1270 | break; |
1271 | |
1272 | case MPI_FUNCTION_EVENT_ACK: |
1273 | { |
1274 | MSG_EVENT_ACK_REPLY *msg = vmsg; |
1275 | int index = le32toh(msg->MsgContext) & ~0x80000000; |
1276 | mpt_free_reply(mpt, (reply << 1)); |
1277 | if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { |
1278 | request_t *req = &mpt->request_pool[index]; |
1279 | mpt_free_request(mpt, req); |
1280 | } |
1281 | break; |
1282 | } |
1283 | |
1284 | case MPI_FUNCTION_PORT_ENABLE: |
1285 | { |
1286 | MSG_PORT_ENABLE_REPLY *msg = vmsg; |
1287 | int index = le32toh(msg->MsgContext) & ~0x80000000; |
1288 | if (mpt->verbose > 1) |
1289 | mpt_prt(mpt, "enable port reply index %d" , index); |
1290 | if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { |
1291 | request_t *req = &mpt->request_pool[index]; |
1292 | req->debug = REQ_DONE; |
1293 | } |
1294 | mpt_free_reply(mpt, (reply << 1)); |
1295 | break; |
1296 | } |
1297 | |
1298 | case MPI_FUNCTION_CONFIG: |
1299 | { |
1300 | MSG_CONFIG_REPLY *msg = vmsg; |
1301 | int index = le32toh(msg->MsgContext) & ~0x80000000; |
1302 | if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { |
1303 | request_t *req = &mpt->request_pool[index]; |
1304 | req->debug = REQ_DONE; |
1305 | req->sequence = reply; |
1306 | } else |
1307 | mpt_free_reply(mpt, (reply << 1)); |
1308 | break; |
1309 | } |
1310 | |
1311 | default: |
1312 | mpt_prt(mpt, "unknown ctlop: 0x%x" , dmsg->Function); |
1313 | } |
1314 | } |
1315 | |
1316 | static void |
1317 | mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg) |
1318 | { |
1319 | |
1320 | switch (le32toh(msg->Event)) { |
1321 | case MPI_EVENT_LOG_DATA: |
1322 | { |
1323 | int i; |
1324 | |
1325 | /* Some error occurrerd that the Fusion wants logged. */ |
1326 | mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x" , msg->IOCLogInfo); |
1327 | mpt_prt(mpt, "EvtLogData: Event Data:" ); |
1328 | for (i = 0; i < msg->EventDataLength; i++) { |
1329 | if ((i % 4) == 0) |
1330 | printf("%s:\t" , device_xname(mpt->sc_dev)); |
1331 | printf("0x%08x%c" , msg->Data[i], |
1332 | ((i % 4) == 3) ? '\n' : ' '); |
1333 | } |
1334 | if ((i % 4) != 0) |
1335 | printf("\n" ); |
1336 | break; |
1337 | } |
1338 | |
1339 | case MPI_EVENT_UNIT_ATTENTION: |
1340 | mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x" , |
1341 | (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff); |
1342 | break; |
1343 | |
1344 | case MPI_EVENT_IOC_BUS_RESET: |
1345 | /* We generated a bus reset. */ |
1346 | mpt_prt(mpt, "IOC Bus Reset Port %d" , |
1347 | (msg->Data[0] >> 8) & 0xff); |
1348 | break; |
1349 | |
1350 | case MPI_EVENT_EXT_BUS_RESET: |
1351 | /* Someone else generated a bus reset. */ |
1352 | mpt_prt(mpt, "External Bus Reset" ); |
1353 | /* |
1354 | * These replies don't return EventData like the MPI |
1355 | * spec says they do. |
1356 | */ |
1357 | /* XXX Send an async event? */ |
1358 | break; |
1359 | |
1360 | case MPI_EVENT_RESCAN: |
1361 | /* |
1362 | * In general, thise means a device has been added |
1363 | * to the loop. |
1364 | */ |
1365 | mpt_prt(mpt, "Rescan Port %d" , (msg->Data[0] >> 8) & 0xff); |
1366 | /* XXX Send an async event? */ |
1367 | break; |
1368 | |
1369 | case MPI_EVENT_LINK_STATUS_CHANGE: |
1370 | mpt_prt(mpt, "Port %d: Link state %s" , |
1371 | (msg->Data[1] >> 8) & 0xff, |
1372 | (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active" ); |
1373 | break; |
1374 | |
1375 | case MPI_EVENT_LOOP_STATE_CHANGE: |
1376 | switch ((msg->Data[0] >> 16) & 0xff) { |
1377 | case 0x01: |
1378 | mpt_prt(mpt, |
1379 | "Port %d: FC Link Event: LIP(%02x,%02x) " |
1380 | "(Loop Initialization)" , |
1381 | (msg->Data[1] >> 8) & 0xff, |
1382 | (msg->Data[0] >> 8) & 0xff, |
1383 | (msg->Data[0] ) & 0xff); |
1384 | switch ((msg->Data[0] >> 8) & 0xff) { |
1385 | case 0xf7: |
1386 | if ((msg->Data[0] & 0xff) == 0xf7) |
1387 | mpt_prt(mpt, "\tDevice needs AL_PA" ); |
1388 | else |
1389 | mpt_prt(mpt, "\tDevice %02x doesn't " |
1390 | "like FC performance" , |
1391 | msg->Data[0] & 0xff); |
1392 | break; |
1393 | |
1394 | case 0xf8: |
1395 | if ((msg->Data[0] & 0xff) == 0xf7) |
1396 | mpt_prt(mpt, "\tDevice detected loop " |
1397 | "failure before acquiring AL_PA" ); |
1398 | else |
1399 | mpt_prt(mpt, "\tDevice %02x detected " |
1400 | "loop failure" , |
1401 | msg->Data[0] & 0xff); |
1402 | break; |
1403 | |
1404 | default: |
1405 | mpt_prt(mpt, "\tDevice %02x requests that " |
1406 | "device %02x reset itself" , |
1407 | msg->Data[0] & 0xff, |
1408 | (msg->Data[0] >> 8) & 0xff); |
1409 | break; |
1410 | } |
1411 | break; |
1412 | |
1413 | case 0x02: |
1414 | mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) " |
1415 | "(Loop Port Enable)" , |
1416 | (msg->Data[1] >> 8) & 0xff, |
1417 | (msg->Data[0] >> 8) & 0xff, |
1418 | (msg->Data[0] ) & 0xff); |
1419 | break; |
1420 | |
1421 | case 0x03: |
1422 | mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) " |
1423 | "(Loop Port Bypass)" , |
1424 | (msg->Data[1] >> 8) & 0xff, |
1425 | (msg->Data[0] >> 8) & 0xff, |
1426 | (msg->Data[0] ) & 0xff); |
1427 | break; |
1428 | |
1429 | default: |
1430 | mpt_prt(mpt, "Port %d: FC Link Event: " |
1431 | "Unknown event (%02x %02x %02x)" , |
1432 | (msg->Data[1] >> 8) & 0xff, |
1433 | (msg->Data[0] >> 16) & 0xff, |
1434 | (msg->Data[0] >> 8) & 0xff, |
1435 | (msg->Data[0] ) & 0xff); |
1436 | break; |
1437 | } |
1438 | break; |
1439 | |
1440 | case MPI_EVENT_LOGOUT: |
1441 | mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x" , |
1442 | (msg->Data[1] >> 8) & 0xff, msg->Data[0]); |
1443 | break; |
1444 | |
1445 | case MPI_EVENT_EVENT_CHANGE: |
1446 | /* |
1447 | * This is just an acknowledgement of our |
1448 | * mpt_send_event_request(). |
1449 | */ |
1450 | break; |
1451 | |
1452 | case MPI_EVENT_SAS_PHY_LINK_STATUS: |
1453 | switch ((msg->Data[0] >> 12) & 0x0f) { |
1454 | case 0x00: |
1455 | mpt_prt(mpt, "Phy %d: Link Status Unknown" , |
1456 | msg->Data[0] & 0xff); |
1457 | break; |
1458 | case 0x01: |
1459 | mpt_prt(mpt, "Phy %d: Link Disabled" , |
1460 | msg->Data[0] & 0xff); |
1461 | break; |
1462 | case 0x02: |
1463 | mpt_prt(mpt, "Phy %d: Failed Speed Negotiation" , |
1464 | msg->Data[0] & 0xff); |
1465 | break; |
1466 | case 0x03: |
1467 | mpt_prt(mpt, "Phy %d: SATA OOB Complete" , |
1468 | msg->Data[0] & 0xff); |
1469 | break; |
1470 | case 0x08: |
1471 | mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps" , |
1472 | msg->Data[0] & 0xff); |
1473 | break; |
1474 | case 0x09: |
1475 | mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps" , |
1476 | msg->Data[0] & 0xff); |
1477 | break; |
1478 | default: |
1479 | mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: " |
1480 | "Unknown event (%0x)" , |
1481 | msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff); |
1482 | } |
1483 | break; |
1484 | |
1485 | case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: |
1486 | case MPI_EVENT_SAS_DISCOVERY: |
1487 | /* ignore these events for now */ |
1488 | break; |
1489 | |
1490 | case MPI_EVENT_QUEUE_FULL: |
1491 | /* This can get a little chatty */ |
1492 | if (mpt->verbose > 0) |
1493 | mpt_prt(mpt, "Queue Full Event" ); |
1494 | break; |
1495 | |
1496 | default: |
1497 | mpt_prt(mpt, "Unknown async event: 0x%x" , msg->Event); |
1498 | break; |
1499 | } |
1500 | |
1501 | if (msg->AckRequired) { |
1502 | MSG_EVENT_ACK *ackp; |
1503 | request_t *req; |
1504 | |
1505 | if ((req = mpt_get_request(mpt)) == NULL) { |
1506 | /* XXX XXX XXX XXXJRT */ |
1507 | panic("mpt_event_notify_reply: unable to allocate " |
1508 | "request structure" ); |
1509 | } |
1510 | |
1511 | ackp = (MSG_EVENT_ACK *) req->req_vbuf; |
1512 | memset(ackp, 0, sizeof(*ackp)); |
1513 | ackp->Function = MPI_FUNCTION_EVENT_ACK; |
1514 | ackp->Event = msg->Event; |
1515 | ackp->EventContext = msg->EventContext; |
1516 | ackp->MsgContext = htole32(req->index | 0x80000000); |
1517 | mpt_check_doorbell(mpt); |
1518 | mpt_send_cmd(mpt, req); |
1519 | } |
1520 | } |
1521 | |
1522 | static void |
1523 | mpt_bus_reset(mpt_softc_t *mpt) |
1524 | { |
1525 | request_t *req; |
1526 | MSG_SCSI_TASK_MGMT *mngt_req; |
1527 | int s; |
1528 | |
1529 | s = splbio(); |
1530 | if (mpt->mngt_req) { |
1531 | /* request already queued; can't do more */ |
1532 | splx(s); |
1533 | return; |
1534 | } |
1535 | req = mpt_get_request(mpt); |
1536 | if (__predict_false(req == NULL)) { |
1537 | mpt_prt(mpt, "no mngt request\n" ); |
1538 | splx(s); |
1539 | return; |
1540 | } |
1541 | mpt->mngt_req = req; |
1542 | splx(s); |
1543 | mngt_req = req->req_vbuf; |
1544 | memset(mngt_req, 0, sizeof(*mngt_req)); |
1545 | mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; |
1546 | mngt_req->Bus = mpt->bus; |
1547 | mngt_req->TargetID = 0; |
1548 | mngt_req->ChainOffset = 0; |
1549 | mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; |
1550 | mngt_req->Reserved1 = 0; |
1551 | mngt_req->MsgFlags = |
1552 | mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0; |
1553 | mngt_req->MsgContext = req->index; |
1554 | mngt_req->TaskMsgContext = 0; |
1555 | s = splbio(); |
1556 | mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req); |
1557 | splx(s); |
1558 | } |
1559 | |
1560 | /***************************************************************************** |
1561 | * SCSI interface routines |
1562 | *****************************************************************************/ |
1563 | |
1564 | static void |
1565 | mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, |
1566 | void *arg) |
1567 | { |
1568 | struct scsipi_adapter *adapt = chan->chan_adapter; |
1569 | mpt_softc_t *mpt = device_private(adapt->adapt_dev); |
1570 | |
1571 | switch (req) { |
1572 | case ADAPTER_REQ_RUN_XFER: |
1573 | mpt_run_xfer(mpt, (struct scsipi_xfer *) arg); |
1574 | return; |
1575 | |
1576 | case ADAPTER_REQ_GROW_RESOURCES: |
1577 | /* Not supported. */ |
1578 | return; |
1579 | |
1580 | case ADAPTER_REQ_SET_XFER_MODE: |
1581 | mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg); |
1582 | return; |
1583 | } |
1584 | } |
1585 | |
1586 | static void |
1587 | mpt_minphys(struct buf *bp) |
1588 | { |
1589 | |
1590 | /* |
1591 | * Subtract one from the SGL limit, since we need an extra one to handle |
1592 | * an non-page-aligned transfer. |
1593 | */ |
1594 | #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE) |
1595 | |
1596 | if (bp->b_bcount > MPT_MAX_XFER) |
1597 | bp->b_bcount = MPT_MAX_XFER; |
1598 | minphys(bp); |
1599 | } |
1600 | |
1601 | static int |
1602 | mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg, |
1603 | int flag, struct proc *p) |
1604 | { |
1605 | mpt_softc_t *mpt; |
1606 | int s; |
1607 | |
1608 | mpt = device_private(chan->chan_adapter->adapt_dev); |
1609 | switch (cmd) { |
1610 | case SCBUSIORESET: |
1611 | mpt_bus_reset(mpt); |
1612 | s = splbio(); |
1613 | mpt_intr(mpt); |
1614 | splx(s); |
1615 | return(0); |
1616 | default: |
1617 | return (ENOTTY); |
1618 | } |
1619 | } |
1620 | |
1621 | #if NBIO > 0 |
1622 | static fCONFIG_PAGE_IOC_2 * |
1623 | mpt_get_cfg_page_ioc2(mpt_softc_t *mpt) |
1624 | { |
1625 | fCONFIG_PAGE_HEADER hdr; |
1626 | fCONFIG_PAGE_IOC_2 *ioc2; |
1627 | int rv; |
1628 | |
1629 | rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr); |
1630 | if (rv) |
1631 | return NULL; |
1632 | |
1633 | ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); |
1634 | if (ioc2 == NULL) |
1635 | return NULL; |
1636 | |
1637 | memcpy(ioc2, &hdr, sizeof(hdr)); |
1638 | |
1639 | rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header); |
1640 | if (rv) |
1641 | goto fail; |
1642 | mpt2host_config_page_ioc_2(ioc2); |
1643 | |
1644 | return ioc2; |
1645 | |
1646 | fail: |
1647 | free(ioc2, M_DEVBUF); |
1648 | return NULL; |
1649 | } |
1650 | |
1651 | static fCONFIG_PAGE_IOC_3 * |
1652 | mpt_get_cfg_page_ioc3(mpt_softc_t *mpt) |
1653 | { |
1654 | fCONFIG_PAGE_HEADER hdr; |
1655 | fCONFIG_PAGE_IOC_3 *ioc3; |
1656 | int rv; |
1657 | |
1658 | rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr); |
1659 | if (rv) |
1660 | return NULL; |
1661 | |
1662 | ioc3 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); |
1663 | if (ioc3 == NULL) |
1664 | return NULL; |
1665 | |
1666 | memcpy(ioc3, &hdr, sizeof(hdr)); |
1667 | |
1668 | rv = mpt_read_cfg_page(mpt, 0, &ioc3->Header); |
1669 | if (rv) |
1670 | goto fail; |
1671 | |
1672 | return ioc3; |
1673 | |
1674 | fail: |
1675 | free(ioc3, M_DEVBUF); |
1676 | return NULL; |
1677 | } |
1678 | |
1679 | |
1680 | static fCONFIG_PAGE_RAID_VOL_0 * |
1681 | mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address) |
1682 | { |
1683 | fCONFIG_PAGE_HEADER hdr; |
1684 | fCONFIG_PAGE_RAID_VOL_0 *rvol0; |
1685 | int rv; |
1686 | |
1687 | rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0, |
1688 | address, &hdr); |
1689 | if (rv) |
1690 | return NULL; |
1691 | |
1692 | rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); |
1693 | if (rvol0 == NULL) |
1694 | return NULL; |
1695 | |
1696 | memcpy(rvol0, &hdr, sizeof(hdr)); |
1697 | |
1698 | rv = mpt_read_cfg_page(mpt, address, &rvol0->Header); |
1699 | if (rv) |
1700 | goto fail; |
1701 | mpt2host_config_page_raid_vol_0(rvol0); |
1702 | |
1703 | return rvol0; |
1704 | |
1705 | fail: |
1706 | free(rvol0, M_DEVBUF); |
1707 | return NULL; |
1708 | } |
1709 | |
1710 | static fCONFIG_PAGE_RAID_PHYS_DISK_0 * |
1711 | mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address) |
1712 | { |
1713 | fCONFIG_PAGE_HEADER hdr; |
1714 | fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0; |
1715 | int rv; |
1716 | |
1717 | rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0, |
1718 | address, &hdr); |
1719 | if (rv) |
1720 | return NULL; |
1721 | |
1722 | physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); |
1723 | if (physdisk0 == NULL) |
1724 | return NULL; |
1725 | |
1726 | memcpy(physdisk0, &hdr, sizeof(hdr)); |
1727 | |
1728 | rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header); |
1729 | if (rv) |
1730 | goto fail; |
1731 | mpt2host_config_page_raid_phys_disk_0(physdisk0); |
1732 | |
1733 | return physdisk0; |
1734 | |
1735 | fail: |
1736 | free(physdisk0, M_DEVBUF); |
1737 | return NULL; |
1738 | } |
1739 | |
1740 | static bool |
1741 | mpt_is_raid(mpt_softc_t *mpt) |
1742 | { |
1743 | fCONFIG_PAGE_IOC_2 *ioc2; |
1744 | bool is_raid = false; |
1745 | |
1746 | ioc2 = mpt_get_cfg_page_ioc2(mpt); |
1747 | if (ioc2 == NULL) |
1748 | return false; |
1749 | |
1750 | if (ioc2->CapabilitiesFlags != 0xdeadbeef) { |
1751 | is_raid = !!(ioc2->CapabilitiesFlags & |
1752 | (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT| |
1753 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT| |
1754 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)); |
1755 | } |
1756 | |
1757 | free(ioc2, M_DEVBUF); |
1758 | |
1759 | return is_raid; |
1760 | } |
1761 | |
1762 | static int |
1763 | mpt_bio_ioctl(device_t dev, u_long cmd, void *addr) |
1764 | { |
1765 | mpt_softc_t *mpt = device_private(dev); |
1766 | int error, s; |
1767 | |
1768 | KERNEL_LOCK(1, curlwp); |
1769 | s = splbio(); |
1770 | |
1771 | switch (cmd) { |
1772 | case BIOCINQ: |
1773 | error = mpt_bio_ioctl_inq(mpt, addr); |
1774 | break; |
1775 | case BIOCVOL: |
1776 | error = mpt_bio_ioctl_vol(mpt, addr); |
1777 | break; |
1778 | case BIOCDISK_NOVOL: |
1779 | error = mpt_bio_ioctl_disk_novol(mpt, addr); |
1780 | break; |
1781 | case BIOCDISK: |
1782 | error = mpt_bio_ioctl_disk(mpt, addr); |
1783 | break; |
1784 | case BIOCSETSTATE: |
1785 | error = mpt_bio_ioctl_setstate(mpt, addr); |
1786 | break; |
1787 | default: |
1788 | error = EINVAL; |
1789 | break; |
1790 | } |
1791 | |
1792 | splx(s); |
1793 | KERNEL_UNLOCK_ONE(curlwp); |
1794 | |
1795 | return error; |
1796 | } |
1797 | |
1798 | static int |
1799 | mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi) |
1800 | { |
1801 | fCONFIG_PAGE_IOC_2 *ioc2; |
1802 | fCONFIG_PAGE_IOC_3 *ioc3; |
1803 | |
1804 | ioc2 = mpt_get_cfg_page_ioc2(mpt); |
1805 | if (ioc2 == NULL) |
1806 | return EIO; |
1807 | ioc3 = mpt_get_cfg_page_ioc3(mpt); |
1808 | if (ioc3 == NULL) { |
1809 | free(ioc2, M_DEVBUF); |
1810 | return EIO; |
1811 | } |
1812 | |
1813 | strlcpy(bi->bi_dev, device_xname(mpt->sc_dev), sizeof(bi->bi_dev)); |
1814 | bi->bi_novol = ioc2->NumActiveVolumes; |
1815 | bi->bi_nodisk = ioc3->NumPhysDisks; |
1816 | |
1817 | free(ioc2, M_DEVBUF); |
1818 | free(ioc3, M_DEVBUF); |
1819 | |
1820 | return 0; |
1821 | } |
1822 | |
1823 | static int |
1824 | mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv) |
1825 | { |
1826 | fCONFIG_PAGE_IOC_2 *ioc2 = NULL; |
1827 | fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol; |
1828 | fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL; |
1829 | struct scsipi_periph *periph; |
1830 | struct scsipi_inquiry_data inqbuf; |
1831 | char vendor[9], product[17], revision[5]; |
1832 | int address; |
1833 | |
1834 | ioc2 = mpt_get_cfg_page_ioc2(mpt); |
1835 | if (ioc2 == NULL) |
1836 | return EIO; |
1837 | |
1838 | if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes) |
1839 | goto fail; |
1840 | |
1841 | ioc2rvol = &ioc2->RaidVolume[bv->bv_volid]; |
1842 | address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8); |
1843 | |
1844 | rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address); |
1845 | if (rvol0 == NULL) |
1846 | goto fail; |
1847 | |
1848 | bv->bv_dev[0] = '\0'; |
1849 | bv->bv_vendor[0] = '\0'; |
1850 | |
1851 | periph = scsipi_lookup_periph(&mpt->sc_channel, ioc2rvol->VolumeBus, 0); |
1852 | if (periph != NULL) { |
1853 | if (periph->periph_dev != NULL) { |
1854 | snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s" , |
1855 | device_xname(periph->periph_dev)); |
1856 | } |
1857 | memset(&inqbuf, 0, sizeof(inqbuf)); |
1858 | if (scsipi_inquire(periph, &inqbuf, |
1859 | XS_CTL_DISCOVERY | XS_CTL_SILENT) == 0) { |
1860 | strnvisx(vendor, sizeof(vendor), |
1861 | inqbuf.vendor, sizeof(inqbuf.vendor), |
1862 | VIS_TRIM|VIS_SAFE|VIS_OCTAL); |
1863 | strnvisx(product, sizeof(product), |
1864 | inqbuf.product, sizeof(inqbuf.product), |
1865 | VIS_TRIM|VIS_SAFE|VIS_OCTAL); |
1866 | strnvisx(revision, sizeof(revision), |
1867 | inqbuf.revision, sizeof(inqbuf.revision), |
1868 | VIS_TRIM|VIS_SAFE|VIS_OCTAL); |
1869 | |
1870 | snprintf(bv->bv_vendor, sizeof(bv->bv_vendor), |
1871 | "%s %s %s" , vendor, product, revision); |
1872 | } |
1873 | |
1874 | snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s" , |
1875 | device_xname(periph->periph_dev)); |
1876 | } |
1877 | bv->bv_nodisk = rvol0->NumPhysDisks; |
1878 | bv->bv_size = (uint64_t)rvol0->MaxLBA * 512; |
1879 | bv->bv_stripe_size = rvol0->StripeSize; |
1880 | bv->bv_percent = -1; |
1881 | bv->bv_seconds = 0; |
1882 | |
1883 | switch (rvol0->VolumeStatus.State) { |
1884 | case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: |
1885 | bv->bv_status = BIOC_SVONLINE; |
1886 | break; |
1887 | case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: |
1888 | bv->bv_status = BIOC_SVDEGRADED; |
1889 | break; |
1890 | case MPI_RAIDVOL0_STATUS_STATE_FAILED: |
1891 | bv->bv_status = BIOC_SVOFFLINE; |
1892 | break; |
1893 | default: |
1894 | bv->bv_status = BIOC_SVINVALID; |
1895 | break; |
1896 | } |
1897 | |
1898 | switch (ioc2rvol->VolumeType) { |
1899 | case MPI_RAID_VOL_TYPE_IS: |
1900 | bv->bv_level = 0; |
1901 | break; |
1902 | case MPI_RAID_VOL_TYPE_IME: |
1903 | case MPI_RAID_VOL_TYPE_IM: |
1904 | bv->bv_level = 1; |
1905 | break; |
1906 | default: |
1907 | bv->bv_level = -1; |
1908 | break; |
1909 | } |
1910 | |
1911 | free(ioc2, M_DEVBUF); |
1912 | free(rvol0, M_DEVBUF); |
1913 | |
1914 | return 0; |
1915 | |
1916 | fail: |
1917 | if (ioc2) free(ioc2, M_DEVBUF); |
1918 | if (rvol0) free(rvol0, M_DEVBUF); |
1919 | return EINVAL; |
1920 | } |
1921 | |
1922 | static void |
1923 | mpt_bio_ioctl_disk_common(mpt_softc_t *mpt, struct bioc_disk *bd, |
1924 | int address) |
1925 | { |
1926 | fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL; |
1927 | char vendor_id[9], product_id[17], product_rev_level[5]; |
1928 | |
1929 | phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address); |
1930 | if (phys == NULL) |
1931 | return; |
1932 | |
1933 | strnvisx(vendor_id, sizeof(vendor_id), |
1934 | phys->InquiryData.VendorID, sizeof(phys->InquiryData.VendorID), |
1935 | VIS_TRIM|VIS_SAFE|VIS_OCTAL); |
1936 | strnvisx(product_id, sizeof(product_id), |
1937 | phys->InquiryData.ProductID, sizeof(phys->InquiryData.ProductID), |
1938 | VIS_TRIM|VIS_SAFE|VIS_OCTAL); |
1939 | strnvisx(product_rev_level, sizeof(product_rev_level), |
1940 | phys->InquiryData.ProductRevLevel, |
1941 | sizeof(phys->InquiryData.ProductRevLevel), |
1942 | VIS_TRIM|VIS_SAFE|VIS_OCTAL); |
1943 | |
1944 | snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s %s" , |
1945 | vendor_id, product_id, product_rev_level); |
1946 | strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial)); |
1947 | bd->bd_procdev[0] = '\0'; |
1948 | bd->bd_channel = phys->PhysDiskBus; |
1949 | bd->bd_target = phys->PhysDiskID; |
1950 | bd->bd_lun = 0; |
1951 | bd->bd_size = (uint64_t)phys->MaxLBA * 512; |
1952 | |
1953 | switch (phys->PhysDiskStatus.State) { |
1954 | case MPI_PHYSDISK0_STATUS_ONLINE: |
1955 | bd->bd_status = BIOC_SDONLINE; |
1956 | break; |
1957 | case MPI_PHYSDISK0_STATUS_MISSING: |
1958 | case MPI_PHYSDISK0_STATUS_FAILED: |
1959 | bd->bd_status = BIOC_SDFAILED; |
1960 | break; |
1961 | case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: |
1962 | case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: |
1963 | case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: |
1964 | bd->bd_status = BIOC_SDOFFLINE; |
1965 | break; |
1966 | case MPI_PHYSDISK0_STATUS_INITIALIZING: |
1967 | bd->bd_status = BIOC_SDSCRUB; |
1968 | break; |
1969 | case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: |
1970 | default: |
1971 | bd->bd_status = BIOC_SDINVALID; |
1972 | break; |
1973 | } |
1974 | |
1975 | free(phys, M_DEVBUF); |
1976 | } |
1977 | |
1978 | static int |
1979 | mpt_bio_ioctl_disk_novol(mpt_softc_t *mpt, struct bioc_disk *bd) |
1980 | { |
1981 | fCONFIG_PAGE_IOC_2 *ioc2 = NULL; |
1982 | fCONFIG_PAGE_IOC_3 *ioc3 = NULL; |
1983 | fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL; |
1984 | fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol; |
1985 | int address, v, d; |
1986 | |
1987 | ioc2 = mpt_get_cfg_page_ioc2(mpt); |
1988 | if (ioc2 == NULL) |
1989 | return EIO; |
1990 | ioc3 = mpt_get_cfg_page_ioc3(mpt); |
1991 | if (ioc3 == NULL) { |
1992 | free(ioc2, M_DEVBUF); |
1993 | return EIO; |
1994 | } |
1995 | |
1996 | if (bd->bd_diskid < 0 || bd->bd_diskid >= ioc3->NumPhysDisks) |
1997 | goto fail; |
1998 | |
1999 | address = ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum; |
2000 | |
2001 | mpt_bio_ioctl_disk_common(mpt, bd, address); |
2002 | |
2003 | bd->bd_disknovol = true; |
2004 | for (v = 0; bd->bd_disknovol && v < ioc2->NumActiveVolumes; v++) { |
2005 | ioc2rvol = &ioc2->RaidVolume[v]; |
2006 | address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8); |
2007 | |
2008 | rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address); |
2009 | if (rvol0 == NULL) |
2010 | continue; |
2011 | |
2012 | for (d = 0; d < rvol0->NumPhysDisks; d++) { |
2013 | if (rvol0->PhysDisk[d].PhysDiskNum == |
2014 | ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum) { |
2015 | bd->bd_disknovol = false; |
2016 | bd->bd_volid = v; |
2017 | break; |
2018 | } |
2019 | } |
2020 | free(rvol0, M_DEVBUF); |
2021 | } |
2022 | |
2023 | free(ioc3, M_DEVBUF); |
2024 | free(ioc2, M_DEVBUF); |
2025 | |
2026 | return 0; |
2027 | |
2028 | fail: |
2029 | if (ioc3) free(ioc3, M_DEVBUF); |
2030 | if (ioc2) free(ioc2, M_DEVBUF); |
2031 | return EINVAL; |
2032 | } |
2033 | |
2034 | |
2035 | static int |
2036 | mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd) |
2037 | { |
2038 | fCONFIG_PAGE_IOC_2 *ioc2 = NULL; |
2039 | fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL; |
2040 | fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol; |
2041 | int address; |
2042 | |
2043 | ioc2 = mpt_get_cfg_page_ioc2(mpt); |
2044 | if (ioc2 == NULL) |
2045 | return EIO; |
2046 | |
2047 | if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes) |
2048 | goto fail; |
2049 | |
2050 | ioc2rvol = &ioc2->RaidVolume[bd->bd_volid]; |
2051 | address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8); |
2052 | |
2053 | rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address); |
2054 | if (rvol0 == NULL) |
2055 | goto fail; |
2056 | |
2057 | if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks) |
2058 | goto fail; |
2059 | |
2060 | address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum; |
2061 | |
2062 | mpt_bio_ioctl_disk_common(mpt, bd, address); |
2063 | |
2064 | free(ioc2, M_DEVBUF); |
2065 | |
2066 | return 0; |
2067 | |
2068 | fail: |
2069 | if (ioc2) free(ioc2, M_DEVBUF); |
2070 | return EINVAL; |
2071 | } |
2072 | |
2073 | static int |
2074 | mpt_bio_ioctl_setstate(mpt_softc_t *mpt, struct bioc_setstate *bs) |
2075 | { |
2076 | return ENOTTY; |
2077 | } |
2078 | #endif |
2079 | |
2080 | |