1/* $NetBSD: mfi.c,v 1.57 2015/04/04 15:10:47 christos Exp $ */
2/* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
3
4/*
5 * Copyright (c) 2012 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
30 *
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43
44 /*-
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 *
49 * Copyright 1994-2009 The FreeBSD Project.
50 * All rights reserved.
51 *
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
60 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
62 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
63 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
64 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
65 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
66 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
67 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
68 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69 *
70 * The views and conclusions contained in the software and documentation
71 * are those of the authors and should not be interpreted as representing
72 * official policies,either expressed or implied, of the FreeBSD Project.
73 */
74
75#include <sys/cdefs.h>
76__KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.57 2015/04/04 15:10:47 christos Exp $");
77
78#include "bio.h"
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/buf.h>
83#include <sys/ioctl.h>
84#include <sys/device.h>
85#include <sys/kernel.h>
86#include <sys/malloc.h>
87#include <sys/proc.h>
88#include <sys/cpu.h>
89#include <sys/conf.h>
90#include <sys/kauth.h>
91
92#include <uvm/uvm_param.h>
93
94#include <sys/bus.h>
95
96#include <dev/scsipi/scsipi_all.h>
97#include <dev/scsipi/scsi_all.h>
98#include <dev/scsipi/scsi_spc.h>
99#include <dev/scsipi/scsipi_disk.h>
100#include <dev/scsipi/scsi_disk.h>
101#include <dev/scsipi/scsiconf.h>
102
103#include <dev/ic/mfireg.h>
104#include <dev/ic/mfivar.h>
105#include <dev/ic/mfiio.h>
106
107#if NBIO > 0
108#include <dev/biovar.h>
109#endif /* NBIO > 0 */
110
111#ifdef MFI_DEBUG
112uint32_t mfi_debug = 0
113/* | MFI_D_CMD */
114/* | MFI_D_INTR */
115/* | MFI_D_MISC */
116/* | MFI_D_DMA */
117/* | MFI_D_IOCTL */
118/* | MFI_D_RW */
119/* | MFI_D_MEM */
120/* | MFI_D_CCB */
121/* | MFI_D_SYNC */
122 ;
123#endif
124
125static void mfi_scsipi_request(struct scsipi_channel *,
126 scsipi_adapter_req_t, void *);
127static void mfiminphys(struct buf *bp);
128
129static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *);
130static void mfi_put_ccb(struct mfi_ccb *);
131static int mfi_init_ccb(struct mfi_softc *);
132
133static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t);
134static void mfi_freemem(struct mfi_softc *, struct mfi_mem **);
135
136static int mfi_transition_firmware(struct mfi_softc *);
137static int mfi_initialize_firmware(struct mfi_softc *);
138static int mfi_get_info(struct mfi_softc *);
139static int mfi_get_bbu(struct mfi_softc *,
140 struct mfi_bbu_status *);
141/* return codes for mfi_get_bbu */
142#define MFI_BBU_GOOD 0
143#define MFI_BBU_BAD 1
144#define MFI_BBU_UNKNOWN 2
145static uint32_t mfi_read(struct mfi_softc *, bus_size_t);
146static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
147static int mfi_poll(struct mfi_ccb *);
148static int mfi_create_sgl(struct mfi_ccb *, int);
149
150/* commands */
151static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
152static int mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
153 uint64_t, uint32_t);
154static void mfi_scsi_ld_done(struct mfi_ccb *);
155static void mfi_scsi_xs_done(struct mfi_ccb *, int, int);
156static int mfi_mgmt_internal(struct mfi_softc *, uint32_t,
157 uint32_t, uint32_t, void *, uint8_t *, bool);
158static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
159 uint32_t, uint32_t, uint32_t, void *, uint8_t *);
160static void mfi_mgmt_done(struct mfi_ccb *);
161
162#if NBIO > 0
163static int mfi_ioctl(device_t, u_long, void *);
164static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
165static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
166static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
167static int mfi_ioctl_alarm(struct mfi_softc *,
168 struct bioc_alarm *);
169static int mfi_ioctl_blink(struct mfi_softc *sc,
170 struct bioc_blink *);
171static int mfi_ioctl_setstate(struct mfi_softc *,
172 struct bioc_setstate *);
173static int mfi_bio_hs(struct mfi_softc *, int, int, void *);
174static int mfi_create_sensors(struct mfi_softc *);
175static int mfi_destroy_sensors(struct mfi_softc *);
176static void mfi_sensor_refresh(struct sysmon_envsys *,
177 envsys_data_t *);
178#endif /* NBIO > 0 */
179static bool mfi_shutdown(device_t, int);
180static bool mfi_suspend(device_t, const pmf_qual_t *);
181static bool mfi_resume(device_t, const pmf_qual_t *);
182
183static dev_type_open(mfifopen);
184static dev_type_close(mfifclose);
185static dev_type_ioctl(mfifioctl);
186const struct cdevsw mfi_cdevsw = {
187 .d_open = mfifopen,
188 .d_close = mfifclose,
189 .d_read = noread,
190 .d_write = nowrite,
191 .d_ioctl = mfifioctl,
192 .d_stop = nostop,
193 .d_tty = notty,
194 .d_poll = nopoll,
195 .d_mmap = nommap,
196 .d_kqfilter = nokqfilter,
197 .d_discard = nodiscard,
198 .d_flag = D_OTHER
199};
200
201extern struct cfdriver mfi_cd;
202
203static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc);
204static void mfi_xscale_intr_ena(struct mfi_softc *sc);
205static void mfi_xscale_intr_dis(struct mfi_softc *sc);
206static int mfi_xscale_intr(struct mfi_softc *sc);
207static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
208
209static const struct mfi_iop_ops mfi_iop_xscale = {
210 mfi_xscale_fw_state,
211 mfi_xscale_intr_dis,
212 mfi_xscale_intr_ena,
213 mfi_xscale_intr,
214 mfi_xscale_post,
215 mfi_scsi_ld_io,
216};
217
218static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc);
219static void mfi_ppc_intr_ena(struct mfi_softc *sc);
220static void mfi_ppc_intr_dis(struct mfi_softc *sc);
221static int mfi_ppc_intr(struct mfi_softc *sc);
222static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
223
224static const struct mfi_iop_ops mfi_iop_ppc = {
225 mfi_ppc_fw_state,
226 mfi_ppc_intr_dis,
227 mfi_ppc_intr_ena,
228 mfi_ppc_intr,
229 mfi_ppc_post,
230 mfi_scsi_ld_io,
231};
232
233uint32_t mfi_gen2_fw_state(struct mfi_softc *sc);
234void mfi_gen2_intr_ena(struct mfi_softc *sc);
235void mfi_gen2_intr_dis(struct mfi_softc *sc);
236int mfi_gen2_intr(struct mfi_softc *sc);
237void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
238
239static const struct mfi_iop_ops mfi_iop_gen2 = {
240 mfi_gen2_fw_state,
241 mfi_gen2_intr_dis,
242 mfi_gen2_intr_ena,
243 mfi_gen2_intr,
244 mfi_gen2_post,
245 mfi_scsi_ld_io,
246};
247
248u_int32_t mfi_skinny_fw_state(struct mfi_softc *);
249void mfi_skinny_intr_dis(struct mfi_softc *);
250void mfi_skinny_intr_ena(struct mfi_softc *);
251int mfi_skinny_intr(struct mfi_softc *);
252void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
253
254static const struct mfi_iop_ops mfi_iop_skinny = {
255 mfi_skinny_fw_state,
256 mfi_skinny_intr_dis,
257 mfi_skinny_intr_ena,
258 mfi_skinny_intr,
259 mfi_skinny_post,
260 mfi_scsi_ld_io,
261};
262
263static int mfi_tbolt_init_desc_pool(struct mfi_softc *);
264static int mfi_tbolt_init_MFI_queue(struct mfi_softc *);
265static void mfi_tbolt_build_mpt_ccb(struct mfi_ccb *);
266int mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
267 uint64_t, uint32_t);
268static void mfi_tbolt_scsi_ld_done(struct mfi_ccb *);
269static int mfi_tbolt_create_sgl(struct mfi_ccb *, int);
270void mfi_tbolt_sync_map_info(struct work *, void *);
271static void mfi_sync_map_complete(struct mfi_ccb *);
272
273u_int32_t mfi_tbolt_fw_state(struct mfi_softc *);
274void mfi_tbolt_intr_dis(struct mfi_softc *);
275void mfi_tbolt_intr_ena(struct mfi_softc *);
276int mfi_tbolt_intr(struct mfi_softc *sc);
277void mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *);
278
279static const struct mfi_iop_ops mfi_iop_tbolt = {
280 mfi_tbolt_fw_state,
281 mfi_tbolt_intr_dis,
282 mfi_tbolt_intr_ena,
283 mfi_tbolt_intr,
284 mfi_tbolt_post,
285 mfi_tbolt_scsi_ld_io,
286};
287
288#define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s))
289#define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s))
290#define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s))
291#define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s))
292#define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c)))
293
294static struct mfi_ccb *
295mfi_get_ccb(struct mfi_softc *sc)
296{
297 struct mfi_ccb *ccb;
298 int s;
299
300 s = splbio();
301 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
302 if (ccb) {
303 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
304 ccb->ccb_state = MFI_CCB_READY;
305 }
306 splx(s);
307
308 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
309 if (__predict_false(ccb == NULL && sc->sc_running))
310 aprint_error_dev(sc->sc_dev, "out of ccb\n");
311
312 return ccb;
313}
314
315static void
316mfi_put_ccb(struct mfi_ccb *ccb)
317{
318 struct mfi_softc *sc = ccb->ccb_sc;
319 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
320 int s;
321
322 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
323
324 hdr->mfh_cmd_status = 0x0;
325 hdr->mfh_flags = 0x0;
326 ccb->ccb_state = MFI_CCB_FREE;
327 ccb->ccb_xs = NULL;
328 ccb->ccb_flags = 0;
329 ccb->ccb_done = NULL;
330 ccb->ccb_direction = 0;
331 ccb->ccb_frame_size = 0;
332 ccb->ccb_extra_frames = 0;
333 ccb->ccb_sgl = NULL;
334 ccb->ccb_data = NULL;
335 ccb->ccb_len = 0;
336 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
337 /* erase tb_request_desc but preserve SMID */
338 int index = ccb->ccb_tb_request_desc.header.SMID;
339 ccb->ccb_tb_request_desc.words = 0;
340 ccb->ccb_tb_request_desc.header.SMID = index;
341 }
342 s = splbio();
343 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
344 splx(s);
345}
346
347static int
348mfi_destroy_ccb(struct mfi_softc *sc)
349{
350 struct mfi_ccb *ccb;
351 uint32_t i;
352
353 DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc));
354
355
356 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
357 /* create a dma map for transfer */
358 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
359 }
360
361 if (i < sc->sc_max_cmds)
362 return EBUSY;
363
364 free(sc->sc_ccb, M_DEVBUF);
365
366 return 0;
367}
368
369static int
370mfi_init_ccb(struct mfi_softc *sc)
371{
372 struct mfi_ccb *ccb;
373 uint32_t i;
374 int error;
375 bus_addr_t io_req_base_phys;
376 uint8_t *io_req_base;
377 int offset;
378
379 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
380
381 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
382 M_DEVBUF, M_WAITOK|M_ZERO);
383 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
384 /*
385 * The first 256 bytes (SMID 0) is not used.
386 * Don't add to the cmd list.
387 */
388 io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool) +
389 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
390 io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) +
391 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
392 } else {
393 io_req_base = NULL; /* XXX: gcc */
394 io_req_base_phys = 0; /* XXX: gcc */
395 }
396
397 for (i = 0; i < sc->sc_max_cmds; i++) {
398 ccb = &sc->sc_ccb[i];
399
400 ccb->ccb_sc = sc;
401
402 /* select i'th frame */
403 ccb->ccb_frame = (union mfi_frame *)
404 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
405 ccb->ccb_pframe =
406 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
407 ccb->ccb_frame->mfr_header.mfh_context = i;
408
409 /* select i'th sense */
410 ccb->ccb_sense = (struct mfi_sense *)
411 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
412 ccb->ccb_psense =
413 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
414
415 /* create a dma map for transfer */
416 error = bus_dmamap_create(sc->sc_datadmat,
417 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
418 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
419 if (error) {
420 aprint_error_dev(sc->sc_dev,
421 "cannot create ccb dmamap (%d)\n", error);
422 goto destroy;
423 }
424 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
425 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
426 ccb->ccb_tb_io_request =
427 (struct mfi_mpi2_request_raid_scsi_io *)
428 (io_req_base + offset);
429 ccb->ccb_tb_pio_request =
430 io_req_base_phys + offset;
431 offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i;
432 ccb->ccb_tb_sg_frame =
433 (mpi2_sge_io_union *)(sc->sc_reply_pool_limit +
434 offset);
435 ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr +
436 offset;
437 /* SMID 0 is reserved. Set SMID/index from 1 */
438 ccb->ccb_tb_request_desc.header.SMID = i + 1;
439 }
440
441 DNPRINTF(MFI_D_CCB,
442 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n",
443 ccb->ccb_frame->mfr_header.mfh_context, ccb,
444 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe,
445 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense,
446 (u_long)ccb->ccb_dmamap);
447
448 /* add ccb to queue */
449 mfi_put_ccb(ccb);
450 }
451
452 return 0;
453destroy:
454 /* free dma maps and ccb memory */
455 while (i) {
456 i--;
457 ccb = &sc->sc_ccb[i];
458 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
459 }
460
461 free(sc->sc_ccb, M_DEVBUF);
462
463 return 1;
464}
465
466static uint32_t
467mfi_read(struct mfi_softc *sc, bus_size_t r)
468{
469 uint32_t rv;
470
471 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
472 BUS_SPACE_BARRIER_READ);
473 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
474
475 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv);
476 return rv;
477}
478
479static void
480mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
481{
482 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v);
483
484 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
485 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
486 BUS_SPACE_BARRIER_WRITE);
487}
488
489static struct mfi_mem *
490mfi_allocmem(struct mfi_softc *sc, size_t size)
491{
492 struct mfi_mem *mm;
493 int nsegs;
494
495 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc),
496 (long)size);
497
498 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
499 if (mm == NULL)
500 return NULL;
501
502 mm->am_size = size;
503
504 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
505 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
506 goto amfree;
507
508 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
509 &nsegs, BUS_DMA_NOWAIT) != 0)
510 goto destroy;
511
512 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
513 BUS_DMA_NOWAIT) != 0)
514 goto free;
515
516 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
517 BUS_DMA_NOWAIT) != 0)
518 goto unmap;
519
520 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n",
521 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map);
522
523 memset(mm->am_kva, 0, size);
524 return mm;
525
526unmap:
527 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
528free:
529 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
530destroy:
531 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
532amfree:
533 free(mm, M_DEVBUF);
534
535 return NULL;
536}
537
538static void
539mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
540{
541 struct mfi_mem *mm = *mmp;
542
543 if (mm == NULL)
544 return;
545
546 *mmp = NULL;
547
548 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
549
550 bus_dmamap_unload(sc->sc_dmat, mm->am_map);
551 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
552 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
553 bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
554 free(mm, M_DEVBUF);
555}
556
557static int
558mfi_transition_firmware(struct mfi_softc *sc)
559{
560 uint32_t fw_state, cur_state;
561 int max_wait, i;
562
563 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
564
565 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
566 fw_state);
567
568 while (fw_state != MFI_STATE_READY) {
569 DNPRINTF(MFI_D_MISC,
570 "%s: waiting for firmware to become ready\n",
571 DEVNAME(sc));
572 cur_state = fw_state;
573 switch (fw_state) {
574 case MFI_STATE_FAULT:
575 aprint_error_dev(sc->sc_dev, "firmware fault\n");
576 return 1;
577 case MFI_STATE_WAIT_HANDSHAKE:
578 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
579 sc->sc_ioptype == MFI_IOP_TBOLT)
580 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
581 else
582 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
583 max_wait = 2;
584 break;
585 case MFI_STATE_OPERATIONAL:
586 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
587 sc->sc_ioptype == MFI_IOP_TBOLT)
588 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
589 else
590 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
591 max_wait = 10;
592 break;
593 case MFI_STATE_UNDEFINED:
594 case MFI_STATE_BB_INIT:
595 max_wait = 2;
596 break;
597 case MFI_STATE_FW_INIT:
598 case MFI_STATE_DEVICE_SCAN:
599 case MFI_STATE_FLUSH_CACHE:
600 max_wait = 20;
601 break;
602 case MFI_STATE_BOOT_MESSAGE_PENDING:
603 if (sc->sc_ioptype == MFI_IOP_SKINNY ||
604 sc->sc_ioptype == MFI_IOP_TBOLT) {
605 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
606 } else {
607 mfi_write(sc, MFI_IDB, MFI_INIT_HOTPLUG);
608 }
609 max_wait = 180;
610 break;
611 default:
612 aprint_error_dev(sc->sc_dev,
613 "unknown firmware state %d\n", fw_state);
614 return 1;
615 }
616 for (i = 0; i < (max_wait * 10); i++) {
617 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
618 if (fw_state == cur_state)
619 DELAY(100000);
620 else
621 break;
622 }
623 if (fw_state == cur_state) {
624 aprint_error_dev(sc->sc_dev,
625 "firmware stuck in state %#x\n", fw_state);
626 return 1;
627 }
628 }
629
630 return 0;
631}
632
633static int
634mfi_initialize_firmware(struct mfi_softc *sc)
635{
636 struct mfi_ccb *ccb;
637 struct mfi_init_frame *init;
638 struct mfi_init_qinfo *qinfo;
639
640 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
641
642 if ((ccb = mfi_get_ccb(sc)) == NULL)
643 return 1;
644
645 init = &ccb->ccb_frame->mfr_init;
646 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
647
648 memset(qinfo, 0, sizeof *qinfo);
649 qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
650 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
651 offsetof(struct mfi_prod_cons, mpc_reply_q));
652 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
653 offsetof(struct mfi_prod_cons, mpc_producer));
654 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
655 offsetof(struct mfi_prod_cons, mpc_consumer));
656
657 init->mif_header.mfh_cmd = MFI_CMD_INIT;
658 init->mif_header.mfh_data_len = sizeof *qinfo;
659 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
660
661 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
662 DEVNAME(sc),
663 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
664 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
665
666 if (mfi_poll(ccb)) {
667 aprint_error_dev(sc->sc_dev,
668 "mfi_initialize_firmware failed\n");
669 return 1;
670 }
671
672 mfi_put_ccb(ccb);
673
674 return 0;
675}
676
677static int
678mfi_get_info(struct mfi_softc *sc)
679{
680#ifdef MFI_DEBUG
681 int i;
682#endif
683 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
684
685 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
686 sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false))
687 return 1;
688
689#ifdef MFI_DEBUG
690
691 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
692 printf("%s: active FW %s Version %s date %s time %s\n",
693 DEVNAME(sc),
694 sc->sc_info.mci_image_component[i].mic_name,
695 sc->sc_info.mci_image_component[i].mic_version,
696 sc->sc_info.mci_image_component[i].mic_build_date,
697 sc->sc_info.mci_image_component[i].mic_build_time);
698 }
699
700 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
701 printf("%s: pending FW %s Version %s date %s time %s\n",
702 DEVNAME(sc),
703 sc->sc_info.mci_pending_image_component[i].mic_name,
704 sc->sc_info.mci_pending_image_component[i].mic_version,
705 sc->sc_info.mci_pending_image_component[i].mic_build_date,
706 sc->sc_info.mci_pending_image_component[i].mic_build_time);
707 }
708
709 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
710 DEVNAME(sc),
711 sc->sc_info.mci_max_arms,
712 sc->sc_info.mci_max_spans,
713 sc->sc_info.mci_max_arrays,
714 sc->sc_info.mci_max_lds,
715 sc->sc_info.mci_product_name);
716
717 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
718 DEVNAME(sc),
719 sc->sc_info.mci_serial_number,
720 sc->sc_info.mci_hw_present,
721 sc->sc_info.mci_current_fw_time,
722 sc->sc_info.mci_max_cmds,
723 sc->sc_info.mci_max_sg_elements);
724
725 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
726 DEVNAME(sc),
727 sc->sc_info.mci_max_request_size,
728 sc->sc_info.mci_lds_present,
729 sc->sc_info.mci_lds_degraded,
730 sc->sc_info.mci_lds_offline,
731 sc->sc_info.mci_pd_present);
732
733 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
734 DEVNAME(sc),
735 sc->sc_info.mci_pd_disks_present,
736 sc->sc_info.mci_pd_disks_pred_failure,
737 sc->sc_info.mci_pd_disks_failed);
738
739 printf("%s: nvram %d mem %d flash %d\n",
740 DEVNAME(sc),
741 sc->sc_info.mci_nvram_size,
742 sc->sc_info.mci_memory_size,
743 sc->sc_info.mci_flash_size);
744
745 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
746 DEVNAME(sc),
747 sc->sc_info.mci_ram_correctable_errors,
748 sc->sc_info.mci_ram_uncorrectable_errors,
749 sc->sc_info.mci_cluster_allowed,
750 sc->sc_info.mci_cluster_active);
751
752 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
753 DEVNAME(sc),
754 sc->sc_info.mci_max_strips_per_io,
755 sc->sc_info.mci_raid_levels,
756 sc->sc_info.mci_adapter_ops,
757 sc->sc_info.mci_ld_ops);
758
759 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
760 DEVNAME(sc),
761 sc->sc_info.mci_stripe_sz_ops.min,
762 sc->sc_info.mci_stripe_sz_ops.max,
763 sc->sc_info.mci_pd_ops,
764 sc->sc_info.mci_pd_mix_support);
765
766 printf("%s: ecc_bucket %d pckg_prop %s\n",
767 DEVNAME(sc),
768 sc->sc_info.mci_ecc_bucket_count,
769 sc->sc_info.mci_package_version);
770
771 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
772 DEVNAME(sc),
773 sc->sc_info.mci_properties.mcp_seq_num,
774 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
775 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
776 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
777
778 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
779 DEVNAME(sc),
780 sc->sc_info.mci_properties.mcp_rebuild_rate,
781 sc->sc_info.mci_properties.mcp_patrol_read_rate,
782 sc->sc_info.mci_properties.mcp_bgi_rate,
783 sc->sc_info.mci_properties.mcp_cc_rate);
784
785 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
786 DEVNAME(sc),
787 sc->sc_info.mci_properties.mcp_recon_rate,
788 sc->sc_info.mci_properties.mcp_cache_flush_interval,
789 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
790 sc->sc_info.mci_properties.mcp_spinup_delay,
791 sc->sc_info.mci_properties.mcp_cluster_enable);
792
793 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
794 DEVNAME(sc),
795 sc->sc_info.mci_properties.mcp_coercion_mode,
796 sc->sc_info.mci_properties.mcp_alarm_enable,
797 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
798 sc->sc_info.mci_properties.mcp_disable_battery_warn,
799 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
800
801 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
802 DEVNAME(sc),
803 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
804 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
805 sc->sc_info.mci_properties.mcp_expose_encl_devices);
806
807 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
808 DEVNAME(sc),
809 sc->sc_info.mci_pci.mip_vendor,
810 sc->sc_info.mci_pci.mip_device,
811 sc->sc_info.mci_pci.mip_subvendor,
812 sc->sc_info.mci_pci.mip_subdevice);
813
814 printf("%s: type %#x port_count %d port_addr ",
815 DEVNAME(sc),
816 sc->sc_info.mci_host.mih_type,
817 sc->sc_info.mci_host.mih_port_count);
818
819 for (i = 0; i < 8; i++)
820 printf("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
821 printf("\n");
822
823 printf("%s: type %.x port_count %d port_addr ",
824 DEVNAME(sc),
825 sc->sc_info.mci_device.mid_type,
826 sc->sc_info.mci_device.mid_port_count);
827
828 for (i = 0; i < 8; i++) {
829 printf("%.0" PRIx64 " ",
830 sc->sc_info.mci_device.mid_port_addr[i]);
831 }
832 printf("\n");
833#endif /* MFI_DEBUG */
834
835 return 0;
836}
837
838static int
839mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat)
840{
841 DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc));
842
843 if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
844 sizeof(*stat), stat, NULL, cold ? true : false))
845 return MFI_BBU_UNKNOWN;
846#ifdef MFI_DEBUG
847 printf("bbu type %d, voltage %d, current %d, temperature %d, "
848 "status 0x%x\n", stat->battery_type, stat->voltage, stat->current,
849 stat->temperature, stat->fw_status);
850 printf("details: ");
851 switch(stat->battery_type) {
852 case MFI_BBU_TYPE_IBBU:
853 printf("guage %d relative charge %d charger state %d "
854 "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status,
855 stat->detail.ibbu.relative_charge ,
856 stat->detail.ibbu.charger_system_state ,
857 stat->detail.ibbu.charger_system_ctrl);
858 printf("\tcurrent %d abs charge %d max error %d\n",
859 stat->detail.ibbu.charging_current ,
860 stat->detail.ibbu.absolute_charge ,
861 stat->detail.ibbu.max_error);
862 break;
863 case MFI_BBU_TYPE_BBU:
864 printf("guage %d relative charge %d charger state %d\n",
865 stat->detail.ibbu.gas_guage_status,
866 stat->detail.bbu.relative_charge ,
867 stat->detail.bbu.charger_status );
868 printf("\trem capacity %d fyll capacity %d SOH %d\n",
869 stat->detail.bbu.remaining_capacity ,
870 stat->detail.bbu.full_charge_capacity ,
871 stat->detail.bbu.is_SOH_good);
872 default:
873 printf("\n");
874 }
875#endif
876 switch(stat->battery_type) {
877 case MFI_BBU_TYPE_BBU:
878 return (stat->detail.bbu.is_SOH_good ?
879 MFI_BBU_GOOD : MFI_BBU_BAD);
880 case MFI_BBU_TYPE_NONE:
881 return MFI_BBU_UNKNOWN;
882 default:
883 if (stat->fw_status &
884 (MFI_BBU_STATE_PACK_MISSING |
885 MFI_BBU_STATE_VOLTAGE_LOW |
886 MFI_BBU_STATE_TEMPERATURE_HIGH |
887 MFI_BBU_STATE_LEARN_CYC_FAIL |
888 MFI_BBU_STATE_LEARN_CYC_TIMEOUT |
889 MFI_BBU_STATE_I2C_ERR_DETECT))
890 return MFI_BBU_BAD;
891 return MFI_BBU_GOOD;
892 }
893}
894
895static void
896mfiminphys(struct buf *bp)
897{
898 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
899
900 /* XXX currently using MFI_MAXFER = MAXPHYS */
901 if (bp->b_bcount > MFI_MAXFER)
902 bp->b_bcount = MFI_MAXFER;
903 minphys(bp);
904}
905
906int
907mfi_rescan(device_t self, const char *ifattr, const int *locators)
908{
909 struct mfi_softc *sc = device_private(self);
910
911 if (sc->sc_child != NULL)
912 return 0;
913
914 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan,
915 scsiprint, NULL);
916
917 return 0;
918}
919
920void
921mfi_childdetached(device_t self, device_t child)
922{
923 struct mfi_softc *sc = device_private(self);
924
925 KASSERT(self == sc->sc_dev);
926 KASSERT(child == sc->sc_child);
927
928 if (child == sc->sc_child)
929 sc->sc_child = NULL;
930}
931
932int
933mfi_detach(struct mfi_softc *sc, int flags)
934{
935 int error;
936
937 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
938
939 if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
940 return error;
941
942#if NBIO > 0
943 mfi_destroy_sensors(sc);
944 bio_unregister(sc->sc_dev);
945#endif /* NBIO > 0 */
946
947 mfi_intr_disable(sc);
948 mfi_shutdown(sc->sc_dev, 0);
949
950 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
951 workqueue_destroy(sc->sc_ldsync_wq);
952 mfi_put_ccb(sc->sc_ldsync_ccb);
953 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
954 mfi_freemem(sc, &sc->sc_tbolt_ioc_init);
955 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
956 }
957
958 if ((error = mfi_destroy_ccb(sc)) != 0)
959 return error;
960
961 mfi_freemem(sc, &sc->sc_sense);
962
963 mfi_freemem(sc, &sc->sc_frames);
964
965 mfi_freemem(sc, &sc->sc_pcq);
966
967 return 0;
968}
969
970static bool
971mfi_shutdown(device_t dev, int how)
972{
973 struct mfi_softc *sc = device_private(dev);
974 uint8_t mbox[MFI_MBOX_SIZE];
975 int s = splbio();
976 DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc));
977 if (sc->sc_running) {
978 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
979 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH,
980 MFI_DATA_NONE, 0, NULL, mbox, true)) {
981 aprint_error_dev(dev, "shutdown: cache flush failed\n");
982 goto fail;
983 }
984
985 mbox[0] = 0;
986 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN,
987 MFI_DATA_NONE, 0, NULL, mbox, true)) {
988 aprint_error_dev(dev, "shutdown: "
989 "firmware shutdown failed\n");
990 goto fail;
991 }
992 sc->sc_running = false;
993 }
994 splx(s);
995 return true;
996fail:
997 splx(s);
998 return false;
999}
1000
1001static bool
1002mfi_suspend(device_t dev, const pmf_qual_t *q)
1003{
1004 /* XXX to be implemented */
1005 return false;
1006}
1007
1008static bool
1009mfi_resume(device_t dev, const pmf_qual_t *q)
1010{
1011 /* XXX to be implemented */
1012 return false;
1013}
1014
1015int
1016mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
1017{
1018 struct scsipi_adapter *adapt = &sc->sc_adapt;
1019 struct scsipi_channel *chan = &sc->sc_chan;
1020 uint32_t status, frames, max_sgl;
1021 int i;
1022
1023 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
1024
1025 sc->sc_ioptype = iop;
1026
1027 switch (iop) {
1028 case MFI_IOP_XSCALE:
1029 sc->sc_iop = &mfi_iop_xscale;
1030 break;
1031 case MFI_IOP_PPC:
1032 sc->sc_iop = &mfi_iop_ppc;
1033 break;
1034 case MFI_IOP_GEN2:
1035 sc->sc_iop = &mfi_iop_gen2;
1036 break;
1037 case MFI_IOP_SKINNY:
1038 sc->sc_iop = &mfi_iop_skinny;
1039 break;
1040 case MFI_IOP_TBOLT:
1041 sc->sc_iop = &mfi_iop_tbolt;
1042 break;
1043 default:
1044 panic("%s: unknown iop %d", DEVNAME(sc), iop);
1045 }
1046
1047 if (mfi_transition_firmware(sc))
1048 return 1;
1049
1050 TAILQ_INIT(&sc->sc_ccb_freeq);
1051
1052 status = mfi_fw_state(sc);
1053 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
1054 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
1055 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1056 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1057 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
1058 } else if (sc->sc_64bit_dma) {
1059 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
1060 sc->sc_sgl_size = sizeof(struct mfi_sg64);
1061 } else {
1062 sc->sc_max_sgl = max_sgl;
1063 sc->sc_sgl_size = sizeof(struct mfi_sg32);
1064 }
1065 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
1066 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
1067
1068 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1069 uint32_t tb_mem_size;
1070 /* for Alignment */
1071 tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;
1072
1073 tb_mem_size +=
1074 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1);
1075 sc->sc_reply_pool_size =
1076 ((sc->sc_max_cmds + 1 + 15) / 16) * 16;
1077 tb_mem_size +=
1078 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
1079
1080 /* this is for SGL's */
1081 tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds;
1082 sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size);
1083 if (sc->sc_tbolt_reqmsgpool == NULL) {
1084 aprint_error_dev(sc->sc_dev,
1085 "unable to allocate thunderbolt "
1086 "request message pool\n");
1087 goto nopcq;
1088 }
1089 if (mfi_tbolt_init_desc_pool(sc)) {
1090 aprint_error_dev(sc->sc_dev,
1091 "Thunderbolt pool preparation error\n");
1092 goto nopcq;
1093 }
1094
1095 /*
1096 * Allocate DMA memory mapping for MPI2 IOC Init descriptor,
1097 * we are taking it diffrent from what we have allocated for
1098 * Request and reply descriptors to avoid confusion later
1099 */
1100 sc->sc_tbolt_ioc_init = mfi_allocmem(sc,
1101 sizeof(struct mpi2_ioc_init_request));
1102 if (sc->sc_tbolt_ioc_init == NULL) {
1103 aprint_error_dev(sc->sc_dev,
1104 "unable to allocate thunderbolt IOC init memory");
1105 goto nopcq;
1106 }
1107
1108 sc->sc_tbolt_verbuf = mfi_allocmem(sc,
1109 MEGASAS_MAX_NAME*sizeof(bus_addr_t));
1110 if (sc->sc_tbolt_verbuf == NULL) {
1111 aprint_error_dev(sc->sc_dev,
1112 "unable to allocate thunderbolt version buffer\n");
1113 goto nopcq;
1114 }
1115
1116 }
1117 /* consumer/producer and reply queue memory */
1118 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
1119 sizeof(struct mfi_prod_cons));
1120 if (sc->sc_pcq == NULL) {
1121 aprint_error_dev(sc->sc_dev,
1122 "unable to allocate reply queue memory\n");
1123 goto nopcq;
1124 }
1125 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1126 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1127 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1128
1129 /* frame memory */
1130 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
1131 MFI_FRAME_SIZE + 1;
1132 sc->sc_frames_size = frames * MFI_FRAME_SIZE;
1133 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
1134 if (sc->sc_frames == NULL) {
1135 aprint_error_dev(sc->sc_dev,
1136 "unable to allocate frame memory\n");
1137 goto noframe;
1138 }
1139 /* XXX hack, fix this */
1140 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
1141 aprint_error_dev(sc->sc_dev,
1142 "improper frame alignment (%#llx) FIXME\n",
1143 (long long int)MFIMEM_DVA(sc->sc_frames));
1144 goto noframe;
1145 }
1146
1147 /* sense memory */
1148 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
1149 if (sc->sc_sense == NULL) {
1150 aprint_error_dev(sc->sc_dev,
1151 "unable to allocate sense memory\n");
1152 goto nosense;
1153 }
1154
1155 /* now that we have all memory bits go initialize ccbs */
1156 if (mfi_init_ccb(sc)) {
1157 aprint_error_dev(sc->sc_dev, "could not init ccb list\n");
1158 goto noinit;
1159 }
1160
1161 /* kickstart firmware with all addresses and pointers */
1162 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1163 if (mfi_tbolt_init_MFI_queue(sc)) {
1164 aprint_error_dev(sc->sc_dev,
1165 "could not initialize firmware\n");
1166 goto noinit;
1167 }
1168 } else {
1169 if (mfi_initialize_firmware(sc)) {
1170 aprint_error_dev(sc->sc_dev,
1171 "could not initialize firmware\n");
1172 goto noinit;
1173 }
1174 }
1175 sc->sc_running = true;
1176
1177 if (mfi_get_info(sc)) {
1178 aprint_error_dev(sc->sc_dev,
1179 "could not retrieve controller information\n");
1180 goto noinit;
1181 }
1182 aprint_normal_dev(sc->sc_dev,
1183 "%s version %s\n",
1184 sc->sc_info.mci_product_name,
1185 sc->sc_info.mci_package_version);
1186
1187
1188 aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ",
1189 sc->sc_info.mci_lds_present,
1190 sc->sc_info.mci_memory_size);
1191 sc->sc_bbuok = false;
1192 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) {
1193 struct mfi_bbu_status bbu_stat;
1194 int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat);
1195 aprint_normal("BBU type ");
1196 switch (bbu_stat.battery_type) {
1197 case MFI_BBU_TYPE_BBU:
1198 aprint_normal("BBU");
1199 break;
1200 case MFI_BBU_TYPE_IBBU:
1201 aprint_normal("IBBU");
1202 break;
1203 default:
1204 aprint_normal("unknown type %d", bbu_stat.battery_type);
1205 }
1206 aprint_normal(", status ");
1207 switch(mfi_bbu_status) {
1208 case MFI_BBU_GOOD:
1209 aprint_normal("good\n");
1210 sc->sc_bbuok = true;
1211 break;
1212 case MFI_BBU_BAD:
1213 aprint_normal("bad\n");
1214 break;
1215 case MFI_BBU_UNKNOWN:
1216 aprint_normal("unknown\n");
1217 break;
1218 default:
1219 panic("mfi_bbu_status");
1220 }
1221 } else {
1222 aprint_normal("BBU not present\n");
1223 }
1224
1225 sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
1226 sc->sc_max_ld = sc->sc_ld_cnt;
1227 for (i = 0; i < sc->sc_ld_cnt; i++)
1228 sc->sc_ld[i].ld_present = 1;
1229
1230 memset(adapt, 0, sizeof(*adapt));
1231 adapt->adapt_dev = sc->sc_dev;
1232 adapt->adapt_nchannels = 1;
1233 /* keep a few commands for management */
1234 if (sc->sc_max_cmds > 4)
1235 adapt->adapt_openings = sc->sc_max_cmds - 4;
1236 else
1237 adapt->adapt_openings = sc->sc_max_cmds;
1238 adapt->adapt_max_periph = adapt->adapt_openings;
1239 adapt->adapt_request = mfi_scsipi_request;
1240 adapt->adapt_minphys = mfiminphys;
1241
1242 memset(chan, 0, sizeof(*chan));
1243 chan->chan_adapter = adapt;
1244 chan->chan_bustype = &scsi_sas_bustype;
1245 chan->chan_channel = 0;
1246 chan->chan_flags = 0;
1247 chan->chan_nluns = 8;
1248 chan->chan_ntargets = MFI_MAX_LD;
1249 chan->chan_id = MFI_MAX_LD;
1250
1251 mfi_rescan(sc->sc_dev, "scsi", NULL);
1252
1253 /* enable interrupts */
1254 mfi_intr_enable(sc);
1255
1256#if NBIO > 0
1257 if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
1258 panic("%s: controller registration failed", DEVNAME(sc));
1259 if (mfi_create_sensors(sc) != 0)
1260 aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
1261#endif /* NBIO > 0 */
1262 if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume,
1263 mfi_shutdown)) {
1264 aprint_error_dev(sc->sc_dev,
1265 "couldn't establish power handler\n");
1266 }
1267
1268 return 0;
1269noinit:
1270 mfi_freemem(sc, &sc->sc_sense);
1271nosense:
1272 mfi_freemem(sc, &sc->sc_frames);
1273noframe:
1274 mfi_freemem(sc, &sc->sc_pcq);
1275nopcq:
1276 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
1277 if (sc->sc_tbolt_reqmsgpool)
1278 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
1279 if (sc->sc_tbolt_verbuf)
1280 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
1281 }
1282 return 1;
1283}
1284
1285static int
1286mfi_poll(struct mfi_ccb *ccb)
1287{
1288 struct mfi_softc *sc = ccb->ccb_sc;
1289 struct mfi_frame_header *hdr;
1290 int to = 0;
1291 int rv = 0;
1292
1293 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
1294
1295 hdr = &ccb->ccb_frame->mfr_header;
1296 hdr->mfh_cmd_status = 0xff;
1297 if (!sc->sc_MFA_enabled)
1298 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1299
1300 /* no callback, caller is supposed to do the cleanup */
1301 ccb->ccb_done = NULL;
1302
1303 mfi_post(sc, ccb);
1304 if (sc->sc_MFA_enabled) {
1305 /*
1306 * depending on the command type, result may be posted
1307 * to *hdr, or not. In addition it seems there's
1308 * no way to avoid posting the SMID to the reply queue.
1309 * So pool using the interrupt routine.
1310 */
1311 while (ccb->ccb_state != MFI_CCB_DONE) {
1312 delay(1000);
1313 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1314 rv = 1;
1315 break;
1316 }
1317 mfi_tbolt_intrh(sc);
1318 }
1319 } else {
1320 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1321 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1322 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1323
1324 while (hdr->mfh_cmd_status == 0xff) {
1325 delay(1000);
1326 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1327 rv = 1;
1328 break;
1329 }
1330 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1331 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1332 sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
1333 }
1334 }
1335 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1336 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1337 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1338
1339 if (ccb->ccb_data != NULL) {
1340 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1341 DEVNAME(sc));
1342 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1343 ccb->ccb_dmamap->dm_mapsize,
1344 (ccb->ccb_direction & MFI_DATA_IN) ?
1345 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1346
1347 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1348 }
1349
1350 if (rv != 0) {
1351 aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n",
1352 hdr->mfh_context);
1353 ccb->ccb_flags |= MFI_CCB_F_ERR;
1354 return 1;
1355 }
1356
1357 return 0;
1358}
1359
1360int
1361mfi_intr(void *arg)
1362{
1363 struct mfi_softc *sc = arg;
1364 struct mfi_prod_cons *pcq;
1365 struct mfi_ccb *ccb;
1366 uint32_t producer, consumer, ctx;
1367 int claimed = 0;
1368
1369 if (!mfi_my_intr(sc))
1370 return 0;
1371
1372 pcq = MFIMEM_KVA(sc->sc_pcq);
1373
1374 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc),
1375 (u_long)sc, (u_long)pcq);
1376
1377 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1378 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1379 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1380
1381 producer = pcq->mpc_producer;
1382 consumer = pcq->mpc_consumer;
1383
1384 while (consumer != producer) {
1385 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
1386 DEVNAME(sc), producer, consumer);
1387
1388 ctx = pcq->mpc_reply_q[consumer];
1389 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
1390 if (ctx == MFI_INVALID_CTX)
1391 aprint_error_dev(sc->sc_dev,
1392 "invalid context, p: %d c: %d\n",
1393 producer, consumer);
1394 else {
1395 /* XXX remove from queue and call scsi_done */
1396 ccb = &sc->sc_ccb[ctx];
1397 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
1398 DEVNAME(sc), ctx);
1399 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
1400 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
1401 sc->sc_frames_size,
1402 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1403 ccb->ccb_done(ccb);
1404
1405 claimed = 1;
1406 }
1407 consumer++;
1408 if (consumer == (sc->sc_max_cmds + 1))
1409 consumer = 0;
1410 }
1411
1412 pcq->mpc_consumer = consumer;
1413 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
1414 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
1415 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1416
1417 return claimed;
1418}
1419
1420static int
1421mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno,
1422 uint32_t blockcnt)
1423{
1424 struct scsipi_periph *periph = xs->xs_periph;
1425 struct mfi_io_frame *io;
1426
1427 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n",
1428 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1429 periph->periph_target);
1430
1431 if (!xs->data)
1432 return 1;
1433
1434 io = &ccb->ccb_frame->mfr_io;
1435 if (xs->xs_control & XS_CTL_DATA_IN) {
1436 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1437 ccb->ccb_direction = MFI_DATA_IN;
1438 } else {
1439 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1440 ccb->ccb_direction = MFI_DATA_OUT;
1441 }
1442 io->mif_header.mfh_target_id = periph->periph_target;
1443 io->mif_header.mfh_timeout = 0;
1444 io->mif_header.mfh_flags = 0;
1445 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1446 io->mif_header.mfh_data_len= blockcnt;
1447 io->mif_lba_hi = (blockno >> 32);
1448 io->mif_lba_lo = (blockno & 0xffffffff);
1449 io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
1450 io->mif_sense_addr_hi = 0;
1451
1452 ccb->ccb_done = mfi_scsi_ld_done;
1453 ccb->ccb_xs = xs;
1454 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1455 ccb->ccb_sgl = &io->mif_sgl;
1456 ccb->ccb_data = xs->data;
1457 ccb->ccb_len = xs->datalen;
1458
1459 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1460 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1461 return 1;
1462
1463 return 0;
1464}
1465
1466static void
1467mfi_scsi_ld_done(struct mfi_ccb *ccb)
1468{
1469 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1470 mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status);
1471}
1472
1473static void
1474mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status)
1475{
1476 struct scsipi_xfer *xs = ccb->ccb_xs;
1477 struct mfi_softc *sc = ccb->ccb_sc;
1478
1479 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n",
1480 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1481
1482 if (xs->data != NULL) {
1483 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
1484 DEVNAME(sc));
1485 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1486 ccb->ccb_dmamap->dm_mapsize,
1487 (xs->xs_control & XS_CTL_DATA_IN) ?
1488 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1489
1490 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1491 }
1492
1493 if (status != MFI_STAT_OK) {
1494 xs->error = XS_DRIVER_STUFFUP;
1495 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
1496 DEVNAME(sc), status);
1497
1498 if (scsi_status != 0) {
1499 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
1500 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
1501 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
1502 DNPRINTF(MFI_D_INTR,
1503 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n",
1504 DEVNAME(sc), scsi_status,
1505 (u_long)&xs->sense, (u_long)ccb->ccb_sense);
1506 memset(&xs->sense, 0, sizeof(xs->sense));
1507 memcpy(&xs->sense, ccb->ccb_sense,
1508 sizeof(struct scsi_sense_data));
1509 xs->error = XS_SENSE;
1510 }
1511 } else {
1512 xs->error = XS_NOERROR;
1513 xs->status = SCSI_OK;
1514 xs->resid = 0;
1515 }
1516
1517 mfi_put_ccb(ccb);
1518 scsipi_done(xs);
1519}
1520
1521static int
1522mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
1523{
1524 struct mfi_pass_frame *pf;
1525 struct scsipi_periph *periph = xs->xs_periph;
1526
1527 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1528 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
1529 periph->periph_target);
1530
1531 pf = &ccb->ccb_frame->mfr_pass;
1532 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1533 pf->mpf_header.mfh_target_id = periph->periph_target;
1534 pf->mpf_header.mfh_lun_id = 0;
1535 pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1536 pf->mpf_header.mfh_timeout = 0;
1537 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
1538 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1539
1540 pf->mpf_sense_addr_hi = 0;
1541 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
1542
1543 memset(pf->mpf_cdb, 0, 16);
1544 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
1545
1546 ccb->ccb_done = mfi_scsi_ld_done;
1547 ccb->ccb_xs = xs;
1548 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1549 ccb->ccb_sgl = &pf->mpf_sgl;
1550
1551 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1552 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
1553 MFI_DATA_IN : MFI_DATA_OUT;
1554 else
1555 ccb->ccb_direction = MFI_DATA_NONE;
1556
1557 if (xs->data) {
1558 ccb->ccb_data = xs->data;
1559 ccb->ccb_len = xs->datalen;
1560
1561 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
1562 BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1563 return 1;
1564 }
1565
1566 return 0;
1567}
1568
1569static void
1570mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1571 void *arg)
1572{
1573 struct scsipi_periph *periph;
1574 struct scsipi_xfer *xs;
1575 struct scsipi_adapter *adapt = chan->chan_adapter;
1576 struct mfi_softc *sc = device_private(adapt->adapt_dev);
1577 struct mfi_ccb *ccb;
1578 struct scsi_rw_6 *rw;
1579 struct scsipi_rw_10 *rwb;
1580 struct scsipi_rw_12 *rw12;
1581 struct scsipi_rw_16 *rw16;
1582 uint64_t blockno;
1583 uint32_t blockcnt;
1584 uint8_t target;
1585 uint8_t mbox[MFI_MBOX_SIZE];
1586 int s;
1587
1588 switch (req) {
1589 case ADAPTER_REQ_GROW_RESOURCES:
1590 /* Not supported. */
1591 return;
1592 case ADAPTER_REQ_SET_XFER_MODE:
1593 {
1594 struct scsipi_xfer_mode *xm = arg;
1595 xm->xm_mode = PERIPH_CAP_TQING;
1596 xm->xm_period = 0;
1597 xm->xm_offset = 0;
1598 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
1599 return;
1600 }
1601 case ADAPTER_REQ_RUN_XFER:
1602 break;
1603 }
1604
1605 xs = arg;
1606
1607 periph = xs->xs_periph;
1608 target = periph->periph_target;
1609
1610 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x "
1611 "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode,
1612 periph->periph_target, periph->periph_lun);
1613
1614 s = splbio();
1615 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
1616 periph->periph_lun != 0) {
1617 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1618 DEVNAME(sc), target);
1619 xs->error = XS_SELTIMEOUT;
1620 scsipi_done(xs);
1621 splx(s);
1622 return;
1623 }
1624 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
1625 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
1626 /* the cache is stable storage, don't flush */
1627 xs->error = XS_NOERROR;
1628 xs->status = SCSI_OK;
1629 xs->resid = 0;
1630 scsipi_done(xs);
1631 splx(s);
1632 return;
1633 }
1634
1635 if ((ccb = mfi_get_ccb(sc)) == NULL) {
1636 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
1637 xs->error = XS_RESOURCE_SHORTAGE;
1638 scsipi_done(xs);
1639 splx(s);
1640 return;
1641 }
1642
1643 switch (xs->cmd->opcode) {
1644 /* IO path */
1645 case READ_16:
1646 case WRITE_16:
1647 rw16 = (struct scsipi_rw_16 *)xs->cmd;
1648 blockno = _8btol(rw16->addr);
1649 blockcnt = _4btol(rw16->length);
1650 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1651 goto stuffup;
1652 }
1653 break;
1654
1655 case READ_12:
1656 case WRITE_12:
1657 rw12 = (struct scsipi_rw_12 *)xs->cmd;
1658 blockno = _4btol(rw12->addr);
1659 blockcnt = _4btol(rw12->length);
1660 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1661 goto stuffup;
1662 }
1663 break;
1664
1665 case READ_10:
1666 case WRITE_10:
1667 rwb = (struct scsipi_rw_10 *)xs->cmd;
1668 blockno = _4btol(rwb->addr);
1669 blockcnt = _2btol(rwb->length);
1670 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1671 goto stuffup;
1672 }
1673 break;
1674
1675 case SCSI_READ_6_COMMAND:
1676 case SCSI_WRITE_6_COMMAND:
1677 rw = (struct scsi_rw_6 *)xs->cmd;
1678 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1679 blockcnt = rw->length ? rw->length : 0x100;
1680 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
1681 goto stuffup;
1682 }
1683 break;
1684
1685 case SCSI_SYNCHRONIZE_CACHE_10:
1686 case SCSI_SYNCHRONIZE_CACHE_16:
1687 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1688 if (mfi_mgmt(ccb, xs,
1689 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) {
1690 goto stuffup;
1691 }
1692 break;
1693
1694 /* hand it of to the firmware and let it deal with it */
1695 case SCSI_TEST_UNIT_READY:
1696 /* save off sd? after autoconf */
1697 if (!cold) /* XXX bogus */
1698 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
1699 sizeof(sc->sc_ld[target].ld_dev));
1700 /* FALLTHROUGH */
1701
1702 default:
1703 if (mfi_scsi_ld(ccb, xs)) {
1704 goto stuffup;
1705 }
1706 break;
1707 }
1708
1709 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1710
1711 if (xs->xs_control & XS_CTL_POLL) {
1712 if (mfi_poll(ccb)) {
1713 /* XXX check for sense in ccb->ccb_sense? */
1714 aprint_error_dev(sc->sc_dev,
1715 "mfi_scsipi_request poll failed\n");
1716 memset(&xs->sense, 0, sizeof(xs->sense));
1717 xs->sense.scsi_sense.response_code =
1718 SSD_RCODE_VALID | SSD_RCODE_CURRENT;
1719 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
1720 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
1721 xs->error = XS_SENSE;
1722 xs->status = SCSI_CHECK;
1723 } else {
1724 DNPRINTF(MFI_D_DMA,
1725 "%s: mfi_scsipi_request poll complete %d\n",
1726 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
1727 xs->error = XS_NOERROR;
1728 xs->status = SCSI_OK;
1729 xs->resid = 0;
1730 }
1731 mfi_put_ccb(ccb);
1732 scsipi_done(xs);
1733 splx(s);
1734 return;
1735 }
1736
1737 mfi_post(sc, ccb);
1738
1739 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
1740 ccb->ccb_dmamap->dm_nsegs);
1741
1742 splx(s);
1743 return;
1744
1745stuffup:
1746 mfi_put_ccb(ccb);
1747 xs->error = XS_DRIVER_STUFFUP;
1748 scsipi_done(xs);
1749 splx(s);
1750}
1751
1752static int
1753mfi_create_sgl(struct mfi_ccb *ccb, int flags)
1754{
1755 struct mfi_softc *sc = ccb->ccb_sc;
1756 struct mfi_frame_header *hdr;
1757 bus_dma_segment_t *sgd;
1758 union mfi_sgl *sgl;
1759 int error, i;
1760
1761 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc),
1762 (u_long)ccb->ccb_data);
1763
1764 if (!ccb->ccb_data)
1765 return 1;
1766
1767 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
1768 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
1769 ccb->ccb_data, ccb->ccb_len, NULL, flags);
1770 if (error) {
1771 if (error == EFBIG) {
1772 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
1773 sc->sc_max_sgl);
1774 } else {
1775 aprint_error_dev(sc->sc_dev,
1776 "error %d loading dma map\n", error);
1777 }
1778 return 1;
1779 }
1780
1781 hdr = &ccb->ccb_frame->mfr_header;
1782 sgl = ccb->ccb_sgl;
1783 sgd = ccb->ccb_dmamap->dm_segs;
1784 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1785 if (sc->sc_ioptype == MFI_IOP_TBOLT &&
1786 (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO ||
1787 hdr->mfh_cmd == MFI_CMD_LD_READ ||
1788 hdr->mfh_cmd == MFI_CMD_LD_WRITE)) {
1789 sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr);
1790 sgl->sg_ieee[i].len = htole32(sgd[i].ds_len);
1791 sgl->sg_ieee[i].flags = 0;
1792 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1793 PRIx32 "\n",
1794 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1795 hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
1796 } else if (sc->sc_64bit_dma) {
1797 sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1798 sgl->sg64[i].len = htole32(sgd[i].ds_len);
1799 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
1800 PRIx32 "\n",
1801 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1802 hdr->mfh_flags |= MFI_FRAME_SGL64;
1803 } else {
1804 sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1805 sgl->sg32[i].len = htole32(sgd[i].ds_len);
1806 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n",
1807 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1808 hdr->mfh_flags |= MFI_FRAME_SGL32;
1809 }
1810 }
1811
1812 if (ccb->ccb_direction == MFI_DATA_IN) {
1813 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1814 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1815 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1816 } else {
1817 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1818 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1819 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1820 }
1821
1822 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1823 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
1824 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1825
1826 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d"
1827 " dm_nsegs: %d extra_frames: %d\n",
1828 DEVNAME(sc),
1829 hdr->mfh_sg_count,
1830 ccb->ccb_frame_size,
1831 sc->sc_frames_size,
1832 ccb->ccb_dmamap->dm_nsegs,
1833 ccb->ccb_extra_frames);
1834
1835 return 0;
1836}
1837
1838static int
1839mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
1840 uint32_t len, void *buf, uint8_t *mbox, bool poll)
1841{
1842 struct mfi_ccb *ccb;
1843 int rv = 1;
1844
1845 if ((ccb = mfi_get_ccb(sc)) == NULL)
1846 return rv;
1847 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
1848 if (rv)
1849 return rv;
1850
1851 if (poll) {
1852 rv = 1;
1853 if (mfi_poll(ccb))
1854 goto done;
1855 } else {
1856 mfi_post(sc, ccb);
1857
1858 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
1859 DEVNAME(sc));
1860 while (ccb->ccb_state != MFI_CCB_DONE)
1861 tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
1862
1863 if (ccb->ccb_flags & MFI_CCB_F_ERR)
1864 goto done;
1865 }
1866 rv = 0;
1867
1868done:
1869 mfi_put_ccb(ccb);
1870 return rv;
1871}
1872
1873static int
1874mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
1875 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1876{
1877 struct mfi_dcmd_frame *dcmd;
1878
1879 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
1880
1881 dcmd = &ccb->ccb_frame->mfr_dcmd;
1882 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1883 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1884 dcmd->mdf_header.mfh_timeout = 0;
1885
1886 dcmd->mdf_opcode = opc;
1887 dcmd->mdf_header.mfh_data_len = 0;
1888 ccb->ccb_direction = dir;
1889 ccb->ccb_xs = xs;
1890 ccb->ccb_done = mfi_mgmt_done;
1891
1892 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1893
1894 /* handle special opcodes */
1895 if (mbox)
1896 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1897
1898 if (dir != MFI_DATA_NONE) {
1899 dcmd->mdf_header.mfh_data_len = len;
1900 ccb->ccb_data = buf;
1901 ccb->ccb_len = len;
1902 ccb->ccb_sgl = &dcmd->mdf_sgl;
1903
1904 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
1905 return 1;
1906 }
1907 return 0;
1908}
1909
1910static void
1911mfi_mgmt_done(struct mfi_ccb *ccb)
1912{
1913 struct scsipi_xfer *xs = ccb->ccb_xs;
1914 struct mfi_softc *sc = ccb->ccb_sc;
1915 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
1916
1917 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
1918 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
1919
1920 if (ccb->ccb_data != NULL) {
1921 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
1922 DEVNAME(sc));
1923 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
1924 ccb->ccb_dmamap->dm_mapsize,
1925 (ccb->ccb_direction & MFI_DATA_IN) ?
1926 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1927
1928 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
1929 }
1930
1931 if (hdr->mfh_cmd_status != MFI_STAT_OK)
1932 ccb->ccb_flags |= MFI_CCB_F_ERR;
1933
1934 ccb->ccb_state = MFI_CCB_DONE;
1935 if (xs) {
1936 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
1937 xs->error = XS_DRIVER_STUFFUP;
1938 } else {
1939 xs->error = XS_NOERROR;
1940 xs->status = SCSI_OK;
1941 xs->resid = 0;
1942 }
1943 mfi_put_ccb(ccb);
1944 scsipi_done(xs);
1945 } else
1946 wakeup(ccb);
1947}
1948
1949#if NBIO > 0
1950int
1951mfi_ioctl(device_t dev, u_long cmd, void *addr)
1952{
1953 struct mfi_softc *sc = device_private(dev);
1954 int error = 0;
1955 int s;
1956
1957 KERNEL_LOCK(1, curlwp);
1958 s = splbio();
1959
1960 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1961
1962 switch (cmd) {
1963 case BIOCINQ:
1964 DNPRINTF(MFI_D_IOCTL, "inq\n");
1965 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1966 break;
1967
1968 case BIOCVOL:
1969 DNPRINTF(MFI_D_IOCTL, "vol\n");
1970 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1971 break;
1972
1973 case BIOCDISK:
1974 DNPRINTF(MFI_D_IOCTL, "disk\n");
1975 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1976 break;
1977
1978 case BIOCALARM:
1979 DNPRINTF(MFI_D_IOCTL, "alarm\n");
1980 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1981 break;
1982
1983 case BIOCBLINK:
1984 DNPRINTF(MFI_D_IOCTL, "blink\n");
1985 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1986 break;
1987
1988 case BIOCSETSTATE:
1989 DNPRINTF(MFI_D_IOCTL, "setstate\n");
1990 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1991 break;
1992
1993 default:
1994 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1995 error = EINVAL;
1996 }
1997 splx(s);
1998 KERNEL_UNLOCK_ONE(curlwp);
1999
2000 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
2001 return error;
2002}
2003
2004static int
2005mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
2006{
2007 struct mfi_conf *cfg;
2008 int rv = EINVAL;
2009
2010 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
2011
2012 if (mfi_get_info(sc)) {
2013 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
2014 DEVNAME(sc));
2015 return EIO;
2016 }
2017
2018 /* get figures */
2019 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2020 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2021 sizeof *cfg, cfg, NULL, false))
2022 goto freeme;
2023
2024 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2025 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2026 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2027
2028 rv = 0;
2029freeme:
2030 free(cfg, M_DEVBUF);
2031 return rv;
2032}
2033
2034static int
2035mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
2036{
2037 int i, per, rv = EINVAL;
2038 uint8_t mbox[MFI_MBOX_SIZE];
2039
2040 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
2041 DEVNAME(sc), bv->bv_volid);
2042
2043 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
2044 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false))
2045 goto done;
2046
2047 i = bv->bv_volid;
2048 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2049 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
2050 DEVNAME(sc), mbox[0]);
2051
2052 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
2053 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox, false))
2054 goto done;
2055
2056 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2057 /* go do hotspares */
2058 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2059 goto done;
2060 }
2061
2062 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
2063
2064 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2065 case MFI_LD_OFFLINE:
2066 bv->bv_status = BIOC_SVOFFLINE;
2067 break;
2068
2069 case MFI_LD_PART_DEGRADED:
2070 case MFI_LD_DEGRADED:
2071 bv->bv_status = BIOC_SVDEGRADED;
2072 break;
2073
2074 case MFI_LD_ONLINE:
2075 bv->bv_status = BIOC_SVONLINE;
2076 break;
2077
2078 default:
2079 bv->bv_status = BIOC_SVINVALID;
2080 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
2081 DEVNAME(sc),
2082 sc->sc_ld_list.mll_list[i].mll_state);
2083 }
2084
2085 /* additional status can modify MFI status */
2086 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
2087 case MFI_LD_PROG_CC:
2088 case MFI_LD_PROG_BGI:
2089 bv->bv_status = BIOC_SVSCRUB;
2090 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
2091 bv->bv_percent = (per * 100) / 0xffff;
2092 bv->bv_seconds =
2093 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
2094 break;
2095
2096 case MFI_LD_PROG_FGI:
2097 case MFI_LD_PROG_RECONSTRUCT:
2098 /* nothing yet */
2099 break;
2100 }
2101
2102 /*
2103 * The RAID levels are determined per the SNIA DDF spec, this is only
2104 * a subset that is valid for the MFI contrller.
2105 */
2106 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
2107 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
2108 MFI_DDF_SRL_SPANNED)
2109 bv->bv_level *= 10;
2110
2111 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
2112 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
2113
2114 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
2115
2116 rv = 0;
2117done:
2118 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
2119 DEVNAME(sc), rv);
2120 return rv;
2121}
2122
2123static int
2124mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
2125{
2126 struct mfi_conf *cfg;
2127 struct mfi_array *ar;
2128 struct mfi_ld_cfg *ld;
2129 struct mfi_pd_details *pd;
2130 struct scsipi_inquiry_data *inqbuf;
2131 char vend[8+16+4+1];
2132 int i, rv = EINVAL;
2133 int arr, vol, disk;
2134 uint32_t size;
2135 uint8_t mbox[MFI_MBOX_SIZE];
2136
2137 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
2138 DEVNAME(sc), bd->bd_diskid);
2139
2140 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2141
2142 /* send single element command to retrieve size for full structure */
2143 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2144 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2145 sizeof *cfg, cfg, NULL, false))
2146 goto freeme;
2147
2148 size = cfg->mfc_size;
2149 free(cfg, M_DEVBUF);
2150
2151 /* memory for read config */
2152 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2153 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2154 size, cfg, NULL, false))
2155 goto freeme;
2156
2157 ar = cfg->mfc_array;
2158
2159 /* calculate offset to ld structure */
2160 ld = (struct mfi_ld_cfg *)(
2161 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2162 cfg->mfc_array_size * cfg->mfc_no_array);
2163
2164 vol = bd->bd_volid;
2165
2166 if (vol >= cfg->mfc_no_ld) {
2167 /* do hotspares */
2168 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
2169 goto freeme;
2170 }
2171
2172 /* find corresponding array for ld */
2173 for (i = 0, arr = 0; i < vol; i++)
2174 arr += ld[i].mlc_parm.mpa_span_depth;
2175
2176 /* offset disk into pd list */
2177 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
2178
2179 /* offset array index into the next spans */
2180 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
2181
2182 bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
2183 switch (ar[arr].pd[disk].mar_pd_state){
2184 case MFI_PD_UNCONFIG_GOOD:
2185 bd->bd_status = BIOC_SDUNUSED;
2186 break;
2187
2188 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
2189 bd->bd_status = BIOC_SDHOTSPARE;
2190 break;
2191
2192 case MFI_PD_OFFLINE:
2193 bd->bd_status = BIOC_SDOFFLINE;
2194 break;
2195
2196 case MFI_PD_FAILED:
2197 bd->bd_status = BIOC_SDFAILED;
2198 break;
2199
2200 case MFI_PD_REBUILD:
2201 bd->bd_status = BIOC_SDREBUILD;
2202 break;
2203
2204 case MFI_PD_ONLINE:
2205 bd->bd_status = BIOC_SDONLINE;
2206 break;
2207
2208 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
2209 default:
2210 bd->bd_status = BIOC_SDINVALID;
2211 break;
2212
2213 }
2214
2215 /* get the remaining fields */
2216 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
2217 memset(pd, 0, sizeof(*pd));
2218 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2219 sizeof *pd, pd, mbox, false))
2220 goto freeme;
2221
2222 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
2223
2224 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
2225 bd->bd_channel = pd->mpd_enc_idx;
2226
2227 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2228 memcpy(vend, inqbuf->vendor, sizeof vend - 1);
2229 vend[sizeof vend - 1] = '\0';
2230 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
2231
2232 /* XXX find a way to retrieve serial nr from drive */
2233 /* XXX find a way to get bd_procdev */
2234
2235 rv = 0;
2236freeme:
2237 free(pd, M_DEVBUF);
2238 free(cfg, M_DEVBUF);
2239
2240 return rv;
2241}
2242
2243static int
2244mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
2245{
2246 uint32_t opc, dir = MFI_DATA_NONE;
2247 int rv = 0;
2248 int8_t ret;
2249
2250 switch(ba->ba_opcode) {
2251 case BIOC_SADISABLE:
2252 opc = MR_DCMD_SPEAKER_DISABLE;
2253 break;
2254
2255 case BIOC_SAENABLE:
2256 opc = MR_DCMD_SPEAKER_ENABLE;
2257 break;
2258
2259 case BIOC_SASILENCE:
2260 opc = MR_DCMD_SPEAKER_SILENCE;
2261 break;
2262
2263 case BIOC_GASTATUS:
2264 opc = MR_DCMD_SPEAKER_GET;
2265 dir = MFI_DATA_IN;
2266 break;
2267
2268 case BIOC_SATEST:
2269 opc = MR_DCMD_SPEAKER_TEST;
2270 break;
2271
2272 default:
2273 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
2274 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
2275 return EINVAL;
2276 }
2277
2278 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false))
2279 rv = EINVAL;
2280 else
2281 if (ba->ba_opcode == BIOC_GASTATUS)
2282 ba->ba_status = ret;
2283 else
2284 ba->ba_status = 0;
2285
2286 return rv;
2287}
2288
2289static int
2290mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
2291{
2292 int i, found, rv = EINVAL;
2293 uint8_t mbox[MFI_MBOX_SIZE];
2294 uint32_t cmd;
2295 struct mfi_pd_list *pd;
2296
2297 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
2298 bb->bb_status);
2299
2300 /* channel 0 means not in an enclosure so can't be blinked */
2301 if (bb->bb_channel == 0)
2302 return EINVAL;
2303
2304 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2305
2306 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2307 MFI_PD_LIST_SIZE, pd, NULL, false))
2308 goto done;
2309
2310 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2311 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
2312 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
2313 found = 1;
2314 break;
2315 }
2316
2317 if (!found)
2318 goto done;
2319
2320 memset(mbox, 0, sizeof mbox);
2321
2322 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2323
2324 switch (bb->bb_status) {
2325 case BIOC_SBUNBLINK:
2326 cmd = MR_DCMD_PD_UNBLINK;
2327 break;
2328
2329 case BIOC_SBBLINK:
2330 cmd = MR_DCMD_PD_BLINK;
2331 break;
2332
2333 case BIOC_SBALARM:
2334 default:
2335 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
2336 "opcode %x\n", DEVNAME(sc), bb->bb_status);
2337 goto done;
2338 }
2339
2340
2341 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox, false))
2342 goto done;
2343
2344 rv = 0;
2345done:
2346 free(pd, M_DEVBUF);
2347 return rv;
2348}
2349
2350static int
2351mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2352{
2353 struct mfi_pd_list *pd;
2354 int i, found, rv = EINVAL;
2355 uint8_t mbox[MFI_MBOX_SIZE];
2356
2357 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2358 bs->bs_status);
2359
2360 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK);
2361
2362 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2363 MFI_PD_LIST_SIZE, pd, NULL, false))
2364 goto done;
2365
2366 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2367 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2368 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2369 found = 1;
2370 break;
2371 }
2372
2373 if (!found)
2374 goto done;
2375
2376 memset(mbox, 0, sizeof mbox);
2377
2378 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2379
2380 switch (bs->bs_status) {
2381 case BIOC_SSONLINE:
2382 mbox[2] = MFI_PD_ONLINE;
2383 break;
2384
2385 case BIOC_SSOFFLINE:
2386 mbox[2] = MFI_PD_OFFLINE;
2387 break;
2388
2389 case BIOC_SSHOTSPARE:
2390 mbox[2] = MFI_PD_HOTSPARE;
2391 break;
2392/*
2393 case BIOC_SSREBUILD:
2394 break;
2395*/
2396 default:
2397 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2398 "opcode %x\n", DEVNAME(sc), bs->bs_status);
2399 goto done;
2400 }
2401
2402
2403 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE,
2404 0, NULL, mbox, false))
2405 goto done;
2406
2407 rv = 0;
2408done:
2409 free(pd, M_DEVBUF);
2410 return rv;
2411}
2412
2413static int
2414mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2415{
2416 struct mfi_conf *cfg;
2417 struct mfi_hotspare *hs;
2418 struct mfi_pd_details *pd;
2419 struct bioc_disk *sdhs;
2420 struct bioc_vol *vdhs;
2421 struct scsipi_inquiry_data *inqbuf;
2422 char vend[8+16+4+1];
2423 int i, rv = EINVAL;
2424 uint32_t size;
2425 uint8_t mbox[MFI_MBOX_SIZE];
2426
2427 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2428
2429 if (!bio_hs)
2430 return EINVAL;
2431
2432 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
2433
2434 /* send single element command to retrieve size for full structure */
2435 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2436 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2437 sizeof *cfg, cfg, NULL, false))
2438 goto freeme;
2439
2440 size = cfg->mfc_size;
2441 free(cfg, M_DEVBUF);
2442
2443 /* memory for read config */
2444 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2445 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN,
2446 size, cfg, NULL, false))
2447 goto freeme;
2448
2449 /* calculate offset to hs structure */
2450 hs = (struct mfi_hotspare *)(
2451 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2452 cfg->mfc_array_size * cfg->mfc_no_array +
2453 cfg->mfc_ld_size * cfg->mfc_no_ld);
2454
2455 if (volid < cfg->mfc_no_ld)
2456 goto freeme; /* not a hotspare */
2457
2458 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2459 goto freeme; /* not a hotspare */
2460
2461 /* offset into hotspare structure */
2462 i = volid - cfg->mfc_no_ld;
2463
2464 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2465 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2466 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2467
2468 /* get pd fields */
2469 memset(mbox, 0, sizeof mbox);
2470 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
2471 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2472 sizeof *pd, pd, mbox, false)) {
2473 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2474 DEVNAME(sc));
2475 goto freeme;
2476 }
2477
2478 switch (type) {
2479 case MFI_MGMT_VD:
2480 vdhs = bio_hs;
2481 vdhs->bv_status = BIOC_SVONLINE;
2482 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
2483 vdhs->bv_level = -1; /* hotspare */
2484 vdhs->bv_nodisk = 1;
2485 break;
2486
2487 case MFI_MGMT_SD:
2488 sdhs = bio_hs;
2489 sdhs->bd_status = BIOC_SDHOTSPARE;
2490 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
2491 sdhs->bd_channel = pd->mpd_enc_idx;
2492 sdhs->bd_target = pd->mpd_enc_slot;
2493 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
2494 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
2495 vend[sizeof vend - 1] = '\0';
2496 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2497 break;
2498
2499 default:
2500 goto freeme;
2501 }
2502
2503 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2504 rv = 0;
2505freeme:
2506 free(pd, M_DEVBUF);
2507 free(cfg, M_DEVBUF);
2508
2509 return rv;
2510}
2511
2512static int
2513mfi_destroy_sensors(struct mfi_softc *sc)
2514{
2515 if (sc->sc_sme == NULL)
2516 return 0;
2517 sysmon_envsys_unregister(sc->sc_sme);
2518 sc->sc_sme = NULL;
2519 free(sc->sc_sensor, M_DEVBUF);
2520 return 0;
2521}
2522
2523static int
2524mfi_create_sensors(struct mfi_softc *sc)
2525{
2526 int i;
2527 int nsensors = sc->sc_ld_cnt + 1;
2528 int rv;
2529
2530 sc->sc_sme = sysmon_envsys_create();
2531 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
2532 M_DEVBUF, M_NOWAIT | M_ZERO);
2533 if (sc->sc_sensor == NULL) {
2534 aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n");
2535 return ENOMEM;
2536 }
2537
2538 /* BBU */
2539 sc->sc_sensor[0].units = ENVSYS_INDICATOR;
2540 sc->sc_sensor[0].state = ENVSYS_SINVALID;
2541 sc->sc_sensor[0].value_cur = 0;
2542 /* Enable monitoring for BBU state changes, if present */
2543 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU)
2544 sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL;
2545 snprintf(sc->sc_sensor[0].desc,
2546 sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc));
2547 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0]))
2548 goto out;
2549
2550 for (i = 1; i < nsensors; i++) {
2551 sc->sc_sensor[i].units = ENVSYS_DRIVE;
2552 sc->sc_sensor[i].state = ENVSYS_SINVALID;
2553 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
2554 /* Enable monitoring for drive state changes */
2555 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
2556 /* logical drives */
2557 snprintf(sc->sc_sensor[i].desc,
2558 sizeof(sc->sc_sensor[i].desc), "%s:%d",
2559 DEVNAME(sc), i - 1);
2560 if (sysmon_envsys_sensor_attach(sc->sc_sme,
2561 &sc->sc_sensor[i]))
2562 goto out;
2563 }
2564
2565 sc->sc_sme->sme_name = DEVNAME(sc);
2566 sc->sc_sme->sme_cookie = sc;
2567 sc->sc_sme->sme_refresh = mfi_sensor_refresh;
2568 rv = sysmon_envsys_register(sc->sc_sme);
2569 if (rv != 0) {
2570 aprint_error_dev(sc->sc_dev,
2571 "unable to register with sysmon (rv = %d)\n", rv);
2572 goto out;
2573 }
2574 return 0;
2575
2576out:
2577 free(sc->sc_sensor, M_DEVBUF);
2578 sysmon_envsys_destroy(sc->sc_sme);
2579 sc->sc_sme = NULL;
2580 return EINVAL;
2581}
2582
2583static void
2584mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
2585{
2586 struct mfi_softc *sc = sme->sme_cookie;
2587 struct bioc_vol bv;
2588 int s;
2589 int error;
2590
2591 if (edata->sensor >= sc->sc_ld_cnt + 1)
2592 return;
2593
2594 if (edata->sensor == 0) {
2595 /* BBU */
2596 struct mfi_bbu_status bbu_stat;
2597 int bbu_status;
2598 if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0)
2599 return;
2600
2601 KERNEL_LOCK(1, curlwp);
2602 s = splbio();
2603 bbu_status = mfi_get_bbu(sc, &bbu_stat);
2604 splx(s);
2605 KERNEL_UNLOCK_ONE(curlwp);
2606 switch(bbu_status) {
2607 case MFI_BBU_GOOD:
2608 edata->value_cur = 1;
2609 edata->state = ENVSYS_SVALID;
2610 if (!sc->sc_bbuok)
2611 aprint_normal_dev(sc->sc_dev,
2612 "BBU state changed to good\n");
2613 sc->sc_bbuok = true;
2614 break;
2615 case MFI_BBU_BAD:
2616 edata->value_cur = 0;
2617 edata->state = ENVSYS_SCRITICAL;
2618 if (sc->sc_bbuok)
2619 aprint_normal_dev(sc->sc_dev,
2620 "BBU state changed to bad\n");
2621 sc->sc_bbuok = false;
2622 break;
2623 case MFI_BBU_UNKNOWN:
2624 default:
2625 edata->value_cur = 0;
2626 edata->state = ENVSYS_SINVALID;
2627 sc->sc_bbuok = false;
2628 break;
2629 }
2630 return;
2631 }
2632
2633 memset(&bv, 0, sizeof(bv));
2634 bv.bv_volid = edata->sensor - 1;
2635 KERNEL_LOCK(1, curlwp);
2636 s = splbio();
2637 error = mfi_ioctl_vol(sc, &bv);
2638 splx(s);
2639 KERNEL_UNLOCK_ONE(curlwp);
2640 if (error)
2641 bv.bv_status = BIOC_SVINVALID;
2642
2643 bio_vol_to_envsys(edata, &bv);
2644}
2645
2646#endif /* NBIO > 0 */
2647
2648static uint32_t
2649mfi_xscale_fw_state(struct mfi_softc *sc)
2650{
2651 return mfi_read(sc, MFI_OMSG0);
2652}
2653
2654static void
2655mfi_xscale_intr_dis(struct mfi_softc *sc)
2656{
2657 mfi_write(sc, MFI_OMSK, 0);
2658}
2659
2660static void
2661mfi_xscale_intr_ena(struct mfi_softc *sc)
2662{
2663 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2664}
2665
2666static int
2667mfi_xscale_intr(struct mfi_softc *sc)
2668{
2669 uint32_t status;
2670
2671 status = mfi_read(sc, MFI_OSTS);
2672 if (!ISSET(status, MFI_OSTS_INTR_VALID))
2673 return 0;
2674
2675 /* write status back to acknowledge interrupt */
2676 mfi_write(sc, MFI_OSTS, status);
2677 return 1;
2678}
2679
2680static void
2681mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2682{
2683 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2684 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
2685 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2686 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
2687 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
2688 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
2689
2690 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2691 ccb->ccb_extra_frames);
2692 ccb->ccb_state = MFI_CCB_RUNNING;
2693}
2694
2695static uint32_t
2696mfi_ppc_fw_state(struct mfi_softc *sc)
2697{
2698 return mfi_read(sc, MFI_OSP);
2699}
2700
2701static void
2702mfi_ppc_intr_dis(struct mfi_softc *sc)
2703{
2704 /* Taking a wild guess --dyoung */
2705 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
2706 mfi_write(sc, MFI_ODC, 0xffffffff);
2707}
2708
2709static void
2710mfi_ppc_intr_ena(struct mfi_softc *sc)
2711{
2712 mfi_write(sc, MFI_ODC, 0xffffffff);
2713 mfi_write(sc, MFI_OMSK, ~0x80000004);
2714}
2715
2716static int
2717mfi_ppc_intr(struct mfi_softc *sc)
2718{
2719 uint32_t status;
2720
2721 status = mfi_read(sc, MFI_OSTS);
2722 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2723 return 0;
2724
2725 /* write status back to acknowledge interrupt */
2726 mfi_write(sc, MFI_ODC, status);
2727 return 1;
2728}
2729
2730static void
2731mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2732{
2733 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2734 (ccb->ccb_extra_frames << 1));
2735 ccb->ccb_state = MFI_CCB_RUNNING;
2736}
2737
2738u_int32_t
2739mfi_gen2_fw_state(struct mfi_softc *sc)
2740{
2741 return (mfi_read(sc, MFI_OSP));
2742}
2743
2744void
2745mfi_gen2_intr_dis(struct mfi_softc *sc)
2746{
2747 mfi_write(sc, MFI_OMSK, 0xffffffff);
2748 mfi_write(sc, MFI_ODC, 0xffffffff);
2749}
2750
2751void
2752mfi_gen2_intr_ena(struct mfi_softc *sc)
2753{
2754 mfi_write(sc, MFI_ODC, 0xffffffff);
2755 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2756}
2757
2758int
2759mfi_gen2_intr(struct mfi_softc *sc)
2760{
2761 u_int32_t status;
2762
2763 status = mfi_read(sc, MFI_OSTS);
2764 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2765 return (0);
2766
2767 /* write status back to acknowledge interrupt */
2768 mfi_write(sc, MFI_ODC, status);
2769
2770 return (1);
2771}
2772
2773void
2774mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2775{
2776 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2777 (ccb->ccb_extra_frames << 1));
2778 ccb->ccb_state = MFI_CCB_RUNNING;
2779}
2780
2781u_int32_t
2782mfi_skinny_fw_state(struct mfi_softc *sc)
2783{
2784 return (mfi_read(sc, MFI_OSP));
2785}
2786
2787void
2788mfi_skinny_intr_dis(struct mfi_softc *sc)
2789{
2790 mfi_write(sc, MFI_OMSK, 0);
2791}
2792
2793void
2794mfi_skinny_intr_ena(struct mfi_softc *sc)
2795{
2796 mfi_write(sc, MFI_OMSK, ~0x00000001);
2797}
2798
2799int
2800mfi_skinny_intr(struct mfi_softc *sc)
2801{
2802 u_int32_t status;
2803
2804 status = mfi_read(sc, MFI_OSTS);
2805 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2806 return (0);
2807
2808 /* write status back to acknowledge interrupt */
2809 mfi_write(sc, MFI_OSTS, status);
2810
2811 return (1);
2812}
2813
2814void
2815mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2816{
2817 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2818 (ccb->ccb_extra_frames << 1));
2819 mfi_write(sc, MFI_IQPH, 0x00000000);
2820 ccb->ccb_state = MFI_CCB_RUNNING;
2821}
2822
2823#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
2824
2825void
2826mfi_tbolt_intr_ena(struct mfi_softc *sc)
2827{
2828 mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
2829 mfi_read(sc, MFI_OMSK);
2830}
2831
2832void
2833mfi_tbolt_intr_dis(struct mfi_softc *sc)
2834{
2835 mfi_write(sc, MFI_OMSK, 0xFFFFFFFF);
2836 mfi_read(sc, MFI_OMSK);
2837}
2838
2839int
2840mfi_tbolt_intr(struct mfi_softc *sc)
2841{
2842 int32_t status;
2843
2844 status = mfi_read(sc, MFI_OSTS);
2845
2846 if (ISSET(status, 0x1)) {
2847 mfi_write(sc, MFI_OSTS, status);
2848 mfi_read(sc, MFI_OSTS);
2849 if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT))
2850 return 0;
2851 return 1;
2852 }
2853 if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK))
2854 return 0;
2855 mfi_read(sc, MFI_OSTS);
2856 return 1;
2857}
2858
2859u_int32_t
2860mfi_tbolt_fw_state(struct mfi_softc *sc)
2861{
2862 return mfi_read(sc, MFI_OSP);
2863}
2864
2865void
2866mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2867{
2868 if (sc->sc_MFA_enabled) {
2869 if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0)
2870 mfi_tbolt_build_mpt_ccb(ccb);
2871 mfi_write(sc, MFI_IQPL,
2872 ccb->ccb_tb_request_desc.words & 0xFFFFFFFF);
2873 mfi_write(sc, MFI_IQPH,
2874 ccb->ccb_tb_request_desc.words >> 32);
2875 ccb->ccb_state = MFI_CCB_RUNNING;
2876 return;
2877 }
2878 uint64_t bus_add = ccb->ccb_pframe;
2879 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
2880 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2881 mfi_write(sc, MFI_IQPL, bus_add);
2882 mfi_write(sc, MFI_IQPH, bus_add >> 32);
2883 ccb->ccb_state = MFI_CCB_RUNNING;
2884}
2885
2886static void
2887mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb)
2888{
2889 union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc;
2890 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
2891 struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain;
2892
2893 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2894 io_req->SGLOffset0 =
2895 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
2896 io_req->ChainOffset =
2897 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16;
2898
2899 mpi25_ieee_chain =
2900 (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain;
2901 mpi25_ieee_chain->Address = ccb->ccb_pframe;
2902
2903 /*
2904 In MFI pass thru, nextChainOffset will always be zero to
2905 indicate the end of the chain.
2906 */
2907 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
2908 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2909
2910 /* setting the length to the maximum length */
2911 mpi25_ieee_chain->Length = 1024;
2912
2913 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2914 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2915 ccb->ccb_flags |= MFI_CCB_F_TBOLT;
2916 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
2917 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2918 ccb->ccb_tb_pio_request -
2919 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
2920 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
2921 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2922}
2923
2924/*
2925 * Description:
2926 * This function will prepare message pools for the Thunderbolt controller
2927 */
2928static int
2929mfi_tbolt_init_desc_pool(struct mfi_softc *sc)
2930{
2931 uint32_t offset = 0;
2932 uint8_t *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2933
2934 /* Request Decriptors alignment restrictions */
2935 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2936
2937 /* Skip request message pool */
2938 addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)];
2939
2940 /* Reply Frame Pool is initialized */
2941 sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
2942 KASSERT(((uintptr_t)addr & 0xFF) == 0);
2943
2944 offset = (uintptr_t)sc->sc_reply_frame_pool
2945 - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
2946 sc->sc_reply_frame_busaddr =
2947 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset;
2948
2949 /* initializing reply address to 0xFFFFFFFF */
2950 memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF,
2951 (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size));
2952
2953 /* Skip Reply Frame Pool */
2954 addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2955 sc->sc_reply_pool_limit = (void *)addr;
2956
2957 offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
2958 sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset;
2959
2960 /* initialize the last_reply_idx to 0 */
2961 sc->sc_last_reply_idx = 0;
2962 offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
2963 sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
2964 KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size);
2965 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0,
2966 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize,
2967 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2968 return 0;
2969}
2970
2971/*
2972 * This routine prepare and issue INIT2 frame to the Firmware
2973 */
2974
2975static int
2976mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
2977{
2978 struct mpi2_ioc_init_request *mpi2IocInit;
2979 struct mfi_init_frame *mfi_init;
2980 struct mfi_ccb *ccb;
2981 bus_addr_t phyAddress;
2982 mfi_address *mfiAddressTemp;
2983 int s;
2984 char *verbuf;
2985 char wqbuf[10];
2986
2987 /* Check if initialization is already completed */
2988 if (sc->sc_MFA_enabled) {
2989 return 1;
2990 }
2991
2992 mpi2IocInit =
2993 (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init);
2994
2995 s = splbio();
2996 if ((ccb = mfi_get_ccb(sc)) == NULL) {
2997 splx(s);
2998 return (EBUSY);
2999 }
3000
3001
3002 mfi_init = &ccb->ccb_frame->mfr_init;
3003
3004 memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request));
3005 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
3006 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3007
3008 /* set MsgVersion and HeaderVersion host driver was built with */
3009 mpi2IocInit->MsgVersion = MPI2_VERSION;
3010 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
3011 mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4;
3012 mpi2IocInit->ReplyDescriptorPostQueueDepth =
3013 (uint16_t)sc->sc_reply_pool_size;
3014 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
3015
3016 /* Get physical address of reply frame pool */
3017 phyAddress = sc->sc_reply_frame_busaddr;
3018 mfiAddressTemp =
3019 (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
3020 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3021 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3022
3023 /* Get physical address of request message pool */
3024 phyAddress = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
3025 mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress;
3026 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
3027 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
3028
3029 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
3030 mpi2IocInit->TimeStamp = time_uptime;
3031
3032 verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf);
3033 snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
3034 MEGASAS_VERSION);
3035 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3036 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE);
3037 mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf));
3038 mfi_init->driver_ver_hi =
3039 htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32);
3040
3041 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3042 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3043 BUS_DMASYNC_PREWRITE);
3044 /* Get the physical address of the mpi2 ioc init command */
3045 phyAddress = MFIMEM_DVA(sc->sc_tbolt_ioc_init);
3046 mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress);
3047 mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32);
3048
3049 mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT;
3050 mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request);
3051 if (mfi_poll(ccb) != 0) {
3052 aprint_error_dev(sc->sc_dev, "failed to send IOC init2 "
3053 "command at 0x%" PRIx64 "\n",
3054 (uint64_t)ccb->ccb_pframe);
3055 splx(s);
3056 return 1;
3057 }
3058 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
3059 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3060 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
3061 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
3062 BUS_DMASYNC_POSTWRITE);
3063 mfi_put_ccb(ccb);
3064 splx(s);
3065
3066 if (mfi_init->mif_header.mfh_cmd_status == 0) {
3067 sc->sc_MFA_enabled = 1;
3068 }
3069 else {
3070 aprint_error_dev(sc->sc_dev, "Init command Failed %x\n",
3071 mfi_init->mif_header.mfh_cmd_status);
3072 return 1;
3073 }
3074
3075 snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc));
3076 if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info,
3077 sc, PRIBIO, IPL_BIO, 0) != 0) {
3078 aprint_error_dev(sc->sc_dev, "workqueue_create failed\n");
3079 return 1;
3080 }
3081 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3082 return 0;
3083}
3084
3085int
3086mfi_tbolt_intrh(void *arg)
3087{
3088 struct mfi_softc *sc = arg;
3089 struct mfi_ccb *ccb;
3090 union mfi_mpi2_reply_descriptor *desc;
3091 int smid, num_completed;
3092
3093 if (!mfi_tbolt_intr(sc))
3094 return 0;
3095
3096 DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc),
3097 (u_long)sc, (u_long)sc->sc_last_reply_idx);
3098
3099 KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size);
3100
3101 desc = (union mfi_mpi2_reply_descriptor *)
3102 ((uintptr_t)sc->sc_reply_frame_pool +
3103 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3104
3105 bus_dmamap_sync(sc->sc_dmat,
3106 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3107 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3108 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3109 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3110 num_completed = 0;
3111 while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) !=
3112 MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
3113 smid = desc->header.SMID;
3114 KASSERT(smid > 0 && smid <= sc->sc_max_cmds);
3115 ccb = &sc->sc_ccb[smid - 1];
3116 DNPRINTF(MFI_D_INTR,
3117 "%s: mfi_tbolt_intr SMID %#x reply_idx %#x "
3118 "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid,
3119 sc->sc_last_reply_idx, desc->words, ccb);
3120 KASSERT(ccb->ccb_state == MFI_CCB_RUNNING);
3121 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO &&
3122 ccb->ccb_tb_io_request->ChainOffset != 0) {
3123 bus_dmamap_sync(sc->sc_dmat,
3124 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3125 ccb->ccb_tb_psg_frame -
3126 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3127 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_POSTREAD);
3128 }
3129 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) {
3130 bus_dmamap_sync(sc->sc_dmat,
3131 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3132 ccb->ccb_tb_pio_request -
3133 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3134 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3135 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3136 }
3137 if (ccb->ccb_done)
3138 ccb->ccb_done(ccb);
3139 else
3140 ccb->ccb_state = MFI_CCB_DONE;
3141 sc->sc_last_reply_idx++;
3142 if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) {
3143 sc->sc_last_reply_idx = 0;
3144 }
3145 desc->words = ~0x0;
3146 /* Get the next reply descriptor */
3147 desc = (union mfi_mpi2_reply_descriptor *)
3148 ((uintptr_t)sc->sc_reply_frame_pool +
3149 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
3150 num_completed++;
3151 }
3152 if (num_completed == 0)
3153 return 0;
3154
3155 bus_dmamap_sync(sc->sc_dmat,
3156 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3157 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
3158 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
3159 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3160 mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx);
3161 return 1;
3162}
3163
3164
3165int
3166mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
3167 uint64_t blockno, uint32_t blockcnt)
3168{
3169 struct scsipi_periph *periph = xs->xs_periph;
3170 struct mfi_mpi2_request_raid_scsi_io *io_req;
3171 int sge_count;
3172
3173 DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n",
3174 device_xname(periph->periph_channel->chan_adapter->adapt_dev),
3175 periph->periph_target);
3176
3177 if (!xs->data)
3178 return 1;
3179
3180 ccb->ccb_done = mfi_tbolt_scsi_ld_done;
3181 ccb->ccb_xs = xs;
3182 ccb->ccb_data = xs->data;
3183 ccb->ccb_len = xs->datalen;
3184
3185 io_req = ccb->ccb_tb_io_request;
3186
3187 /* Just the CDB length,rest of the Flags are zero */
3188 io_req->IoFlags = xs->cmdlen;
3189 memset(io_req->CDB.CDB32, 0, 32);
3190 memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen);
3191
3192 io_req->RaidContext.TargetID = periph->periph_target;
3193 io_req->RaidContext.Status = 0;
3194 io_req->RaidContext.exStatus = 0;
3195 io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
3196 io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST;
3197 io_req->DevHandle = periph->periph_target;
3198
3199 ccb->ccb_tb_request_desc.header.RequestFlags =
3200 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3201 io_req->DataLength = blockcnt * MFI_SECTOR_LEN;
3202
3203 if (xs->xs_control & XS_CTL_DATA_IN) {
3204 io_req->Control = MPI2_SCSIIO_CONTROL_READ;
3205 ccb->ccb_direction = MFI_DATA_IN;
3206 } else {
3207 io_req->Control = MPI2_SCSIIO_CONTROL_WRITE;
3208 ccb->ccb_direction = MFI_DATA_OUT;
3209 }
3210
3211 sge_count = mfi_tbolt_create_sgl(ccb,
3212 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK
3213 );
3214 if (sge_count < 0)
3215 return 1;
3216 KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl);
3217 io_req->RaidContext.numSGE = sge_count;
3218 io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3219 io_req->SGLOffset0 =
3220 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
3221
3222 io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense);
3223 io_req->SenseBufferLength = MFI_SENSE_SIZE;
3224
3225 ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO;
3226 bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
3227 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3228 ccb->ccb_tb_pio_request -
3229 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
3230 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
3231 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3232
3233 return 0;
3234}
3235
3236
3237static void
3238mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb)
3239{
3240 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
3241 mfi_scsi_xs_done(ccb, io_req->RaidContext.Status,
3242 io_req->RaidContext.exStatus);
3243}
3244
3245static int
3246mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags)
3247{
3248 struct mfi_softc *sc = ccb->ccb_sc;
3249 bus_dma_segment_t *sgd;
3250 int error, i, sge_idx, sge_count;
3251 struct mfi_mpi2_request_raid_scsi_io *io_req;
3252 struct mpi25_ieee_sge_chain64 *sgl_ptr;
3253
3254 DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc),
3255 (u_long)ccb->ccb_data);
3256
3257 if (!ccb->ccb_data)
3258 return -1;
3259
3260 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
3261 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
3262 ccb->ccb_data, ccb->ccb_len, NULL, flags);
3263 if (error) {
3264 if (error == EFBIG)
3265 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
3266 sc->sc_max_sgl);
3267 else
3268 aprint_error_dev(sc->sc_dev,
3269 "error %d loading dma map\n", error);
3270 return -1;
3271 }
3272
3273 io_req = ccb->ccb_tb_io_request;
3274 sgl_ptr = &io_req->SGL.IeeeChain.Chain64;
3275 sge_count = ccb->ccb_dmamap->dm_nsegs;
3276 sgd = ccb->ccb_dmamap->dm_segs;
3277 KASSERT(sge_count <= sc->sc_max_sgl);
3278 KASSERT(sge_count <=
3279 (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 +
3280 MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG));
3281
3282 if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) {
3283 /* One element to store the chain info */
3284 sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1;
3285 DNPRINTF(MFI_D_DMA,
3286 "mfi sge_idx %d sge_count %d io_req paddr 0x%" PRIx64 "\n",
3287 sge_idx, sge_count, ccb->ccb_tb_pio_request);
3288 } else {
3289 sge_idx = sge_count;
3290 }
3291
3292 for (i = 0; i < sge_idx; i++) {
3293 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3294 sgl_ptr->Length = htole32(sgd[i].ds_len);
3295 sgl_ptr->Flags = 0;
3296 if (sge_idx < sge_count) {
3297 DNPRINTF(MFI_D_DMA,
3298 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3299 " flags 0x%x\n", sgl_ptr, i,
3300 sgl_ptr->Address, sgl_ptr->Length,
3301 sgl_ptr->Flags);
3302 }
3303 sgl_ptr++;
3304 }
3305 io_req->ChainOffset = 0;
3306 if (sge_idx < sge_count) {
3307 struct mpi25_ieee_sge_chain64 *sg_chain;
3308 io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG;
3309 sg_chain = sgl_ptr;
3310 /* Prepare chain element */
3311 sg_chain->NextChainOffset = 0;
3312 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3313 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
3314 sg_chain->Length = (sizeof(mpi2_sge_io_union) *
3315 (sge_count - sge_idx));
3316 sg_chain->Address = ccb->ccb_tb_psg_frame;
3317 DNPRINTF(MFI_D_DMA,
3318 "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32
3319 " flags 0x%x\n", sg_chain, sg_chain->Address,
3320 sg_chain->Length, sg_chain->Flags);
3321 sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64;
3322 for (; i < sge_count; i++) {
3323 sgl_ptr->Address = htole64(sgd[i].ds_addr);
3324 sgl_ptr->Length = htole32(sgd[i].ds_len);
3325 sgl_ptr->Flags = 0;
3326 DNPRINTF(MFI_D_DMA,
3327 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
3328 " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address,
3329 sgl_ptr->Length, sgl_ptr->Flags);
3330 sgl_ptr++;
3331 }
3332 bus_dmamap_sync(sc->sc_dmat,
3333 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
3334 ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
3335 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_PREREAD);
3336 }
3337
3338 if (ccb->ccb_direction == MFI_DATA_IN) {
3339 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3340 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3341 } else {
3342 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
3343 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
3344 }
3345 return sge_count;
3346}
3347
3348/*
3349 * The ThunderBolt HW has an option for the driver to directly
3350 * access the underlying disks and operate on the RAID. To
3351 * do this there needs to be a capability to keep the RAID controller
3352 * and driver in sync. The FreeBSD driver does not take advantage
3353 * of this feature since it adds a lot of complexity and slows down
3354 * performance. Performance is gained by using the controller's
3355 * cache etc.
3356 *
3357 * Even though this driver doesn't access the disks directly, an
3358 * AEN like command is used to inform the RAID firmware to "sync"
3359 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
3360 * command in write mode will return when the RAID firmware has
3361 * detected a change to the RAID state. Examples of this type
3362 * of change are removing a disk. Once the command returns then
3363 * the driver needs to acknowledge this and "sync" all LD's again.
3364 * This repeats until we shutdown. Then we need to cancel this
3365 * pending command.
3366 *
3367 * If this is not done right the RAID firmware will not remove a
3368 * pulled drive and the RAID won't go degraded etc. Effectively,
3369 * stopping any RAID mangement to functions.
3370 *
3371 * Doing another LD sync, requires the use of an event since the
3372 * driver needs to do a mfi_wait_command and can't do that in an
3373 * interrupt thread.
3374 *
3375 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
3376 * That requires a bunch of structure and it is simplier to just do
3377 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
3378 */
3379
3380void
3381mfi_tbolt_sync_map_info(struct work *w, void *v)
3382{
3383 struct mfi_softc *sc = v;
3384 int i;
3385 struct mfi_ccb *ccb = NULL;
3386 uint8_t mbox[MFI_MBOX_SIZE];
3387 struct mfi_ld *ld_sync = NULL;
3388 size_t ld_size;
3389 int s;
3390
3391 DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc));
3392again:
3393 s = splbio();
3394 if (sc->sc_ldsync_ccb != NULL) {
3395 splx(s);
3396 return;
3397 }
3398
3399 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
3400 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) {
3401 aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n");
3402 goto err;
3403 }
3404
3405 ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld;
3406
3407 ld_sync = malloc(ld_size, M_DEVBUF, M_WAITOK | M_ZERO);
3408 if (ld_sync == NULL) {
3409 aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n");
3410 goto err;
3411 }
3412 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3413 ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld;
3414 }
3415
3416 if ((ccb = mfi_get_ccb(sc)) == NULL) {
3417 aprint_error_dev(sc->sc_dev, "Failed to get sync command\n");
3418 goto err;
3419 }
3420 sc->sc_ldsync_ccb = ccb;
3421
3422 memset(mbox, 0, MFI_MBOX_SIZE);
3423 mbox[0] = sc->sc_ld_list.mll_no_ld;
3424 mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
3425 if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT,
3426 ld_size, ld_sync, mbox)) {
3427 aprint_error_dev(sc->sc_dev, "Failed to create sync command\n");
3428 goto err;
3429 }
3430 /*
3431 * we won't sleep on this command, so we have to override
3432 * the callback set up by mfi_mgmt()
3433 */
3434 ccb->ccb_done = mfi_sync_map_complete;
3435
3436 mfi_post(sc, ccb);
3437 splx(s);
3438 return;
3439
3440err:
3441 if (ld_sync)
3442 free(ld_sync, M_DEVBUF);
3443 if (ccb)
3444 mfi_put_ccb(ccb);
3445 sc->sc_ldsync_ccb = NULL;
3446 splx(s);
3447 kpause("ldsyncp", 0, hz, NULL);
3448 goto again;
3449}
3450
3451static void
3452mfi_sync_map_complete(struct mfi_ccb *ccb)
3453{
3454 struct mfi_softc *sc = ccb->ccb_sc;
3455 bool aborted = !sc->sc_running;
3456
3457 DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n",
3458 DEVNAME(ccb->ccb_sc));
3459 KASSERT(sc->sc_ldsync_ccb == ccb);
3460 mfi_mgmt_done(ccb);
3461 free(ccb->ccb_data, M_DEVBUF);
3462 if (ccb->ccb_flags & MFI_CCB_F_ERR) {
3463 aprint_error_dev(sc->sc_dev, "sync command failed\n");
3464 aborted = true;
3465 }
3466 mfi_put_ccb(ccb);
3467 sc->sc_ldsync_ccb = NULL;
3468
3469 /* set it up again so the driver can catch more events */
3470 if (!aborted) {
3471 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
3472 }
3473}
3474
3475static int
3476mfifopen(dev_t dev, int flag, int mode, struct lwp *l)
3477{
3478 struct mfi_softc *sc;
3479
3480 if ((sc = device_lookup_private(&mfi_cd, minor(dev))) == NULL)
3481 return (ENXIO);
3482 return (0);
3483}
3484
3485static int
3486mfifclose(dev_t dev, int flag, int mode, struct lwp *l)
3487{
3488 return (0);
3489}
3490
3491static int
3492mfifioctl(dev_t dev, u_long cmd, void *data, int flag,
3493 struct lwp *l)
3494{
3495 struct mfi_softc *sc;
3496 struct mfi_ioc_packet *ioc = data;
3497 uint8_t *udata;
3498 struct mfi_ccb *ccb = NULL;
3499 int ctx, i, s, error;
3500 union mfi_sense_ptr sense_ptr;
3501
3502 switch(cmd) {
3503 case MFI_CMD:
3504 sc = device_lookup_private(&mfi_cd, ioc->mfi_adapter_no);
3505 break;
3506 default:
3507 return ENOTTY;
3508 }
3509 if (sc == NULL)
3510 return (ENXIO);
3511 if (sc->sc_opened)
3512 return (EBUSY);
3513
3514 switch(cmd) {
3515 case MFI_CMD:
3516 error = kauth_authorize_device_passthru(l->l_cred, dev,
3517 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
3518 if (error)
3519 return error;
3520 if (ioc->mfi_sge_count > MAX_IOCTL_SGE)
3521 return EINVAL;
3522 s = splbio();
3523 if ((ccb = mfi_get_ccb(sc)) == NULL)
3524 return ENOMEM;
3525 ccb->ccb_data = NULL;
3526 ctx = ccb->ccb_frame->mfr_header.mfh_context;
3527 memcpy(ccb->ccb_frame, ioc->mfi_frame.raw,
3528 sizeof(*ccb->ccb_frame));
3529 ccb->ccb_frame->mfr_header.mfh_context = ctx;
3530 ccb->ccb_frame->mfr_header.mfh_scsi_status = 0;
3531 ccb->ccb_frame->mfr_header.mfh_pad0 = 0;
3532 ccb->ccb_frame_size =
3533 (sizeof(union mfi_sgl) * ioc->mfi_sge_count) +
3534 ioc->mfi_sgl_off;
3535 if (ioc->mfi_sge_count > 0) {
3536 ccb->ccb_sgl = (union mfi_sgl *)
3537 &ccb->ccb_frame->mfr_bytes[ioc->mfi_sgl_off];
3538 }
3539 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_READ)
3540 ccb->ccb_direction = MFI_DATA_IN;
3541 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_WRITE)
3542 ccb->ccb_direction = MFI_DATA_OUT;
3543 ccb->ccb_len = ccb->ccb_frame->mfr_header.mfh_data_len;
3544 if (ccb->ccb_len > MAXPHYS) {
3545 error = ENOMEM;
3546 goto out;
3547 }
3548 if (ccb->ccb_len &&
3549 (ccb->ccb_direction & (MFI_DATA_IN | MFI_DATA_OUT)) != 0) {
3550 udata = malloc(ccb->ccb_len, M_DEVBUF, M_WAITOK|M_ZERO);
3551 if (udata == NULL) {
3552 error = ENOMEM;
3553 goto out;
3554 }
3555 ccb->ccb_data = udata;
3556 if (ccb->ccb_direction & MFI_DATA_OUT) {
3557 for (i = 0; i < ioc->mfi_sge_count; i++) {
3558 error = copyin(ioc->mfi_sgl[i].iov_base,
3559 udata, ioc->mfi_sgl[i].iov_len);
3560 if (error)
3561 goto out;
3562 udata = &udata[
3563 ioc->mfi_sgl[i].iov_len];
3564 }
3565 }
3566 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) {
3567 error = EIO;
3568 goto out;
3569 }
3570 }
3571 if (ccb->ccb_frame->mfr_header.mfh_cmd == MFI_CMD_PD_SCSI_IO) {
3572 ccb->ccb_frame->mfr_io.mif_sense_addr_lo =
3573 htole32(ccb->ccb_psense);
3574 ccb->ccb_frame->mfr_io.mif_sense_addr_hi = 0;
3575 }
3576 ccb->ccb_done = mfi_mgmt_done;
3577 mfi_post(sc, ccb);
3578 while (ccb->ccb_state != MFI_CCB_DONE)
3579 tsleep(ccb, PRIBIO, "mfi_fioc", 0);
3580
3581 if (ccb->ccb_direction & MFI_DATA_IN) {
3582 udata = ccb->ccb_data;
3583 for (i = 0; i < ioc->mfi_sge_count; i++) {
3584 error = copyout(udata,
3585 ioc->mfi_sgl[i].iov_base,
3586 ioc->mfi_sgl[i].iov_len);
3587 if (error)
3588 goto out;
3589 udata = &udata[
3590 ioc->mfi_sgl[i].iov_len];
3591 }
3592 }
3593 if (ioc->mfi_sense_len) {
3594 memcpy(&sense_ptr.sense_ptr_data[0],
3595 &ioc->mfi_frame.raw[ioc->mfi_sense_off],
3596 sizeof(sense_ptr.sense_ptr_data));
3597 error = copyout(ccb->ccb_sense,
3598 sense_ptr.user_space,
3599 sizeof(sense_ptr.sense_ptr_data));
3600 if (error)
3601 goto out;
3602 }
3603 memcpy(ioc->mfi_frame.raw, ccb->ccb_frame,
3604 sizeof(*ccb->ccb_frame));
3605 break;
3606 default:
3607 printf("mfifioctl unhandled cmd 0x%lx\n", cmd);
3608 return ENOTTY;
3609 }
3610
3611out:
3612 if (ccb->ccb_data)
3613 free(ccb->ccb_data, M_DEVBUF);
3614 if (ccb)
3615 mfi_put_ccb(ccb);
3616 splx(s);
3617 return error;
3618}
3619