1 | /* $NetBSD: twe.c,v 1.106 2016/09/27 03:33:32 pgoyette Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2000, 2001, 2002, 2003, 2004 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran; and by Jason R. Thorpe of Wasabi Systems, Inc. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /*- |
33 | * Copyright (c) 2000 Michael Smith |
34 | * Copyright (c) 2000 BSDi |
35 | * All rights reserved. |
36 | * |
37 | * Redistribution and use in source and binary forms, with or without |
38 | * modification, are permitted provided that the following conditions |
39 | * are met: |
40 | * 1. Redistributions of source code must retain the above copyright |
41 | * notice, this list of conditions and the following disclaimer. |
42 | * 2. Redistributions in binary form must reproduce the above copyright |
43 | * notice, this list of conditions and the following disclaimer in the |
44 | * documentation and/or other materials provided with the distribution. |
45 | * |
46 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
47 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
48 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
49 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
50 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
51 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
52 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
53 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
54 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
55 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
56 | * SUCH DAMAGE. |
57 | * |
58 | * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp |
59 | */ |
60 | |
61 | /* |
62 | * Driver for the 3ware Escalade family of RAID controllers. |
63 | */ |
64 | |
65 | #include <sys/cdefs.h> |
66 | __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.106 2016/09/27 03:33:32 pgoyette Exp $" ); |
67 | |
68 | #include <sys/param.h> |
69 | #include <sys/systm.h> |
70 | #include <sys/kernel.h> |
71 | #include <sys/device.h> |
72 | #include <sys/queue.h> |
73 | #include <sys/proc.h> |
74 | #include <sys/buf.h> |
75 | #include <sys/endian.h> |
76 | #include <sys/malloc.h> |
77 | #include <sys/conf.h> |
78 | #include <sys/disk.h> |
79 | #include <sys/sysctl.h> |
80 | #include <sys/syslog.h> |
81 | #include <sys/kauth.h> |
82 | #include <sys/module.h> |
83 | #include <sys/bswap.h> |
84 | #include <sys/bus.h> |
85 | |
86 | #include <dev/pci/pcireg.h> |
87 | #include <dev/pci/pcivar.h> |
88 | #include <dev/pci/pcidevs.h> |
89 | #include <dev/pci/twereg.h> |
90 | #include <dev/pci/twevar.h> |
91 | #include <dev/pci/tweio.h> |
92 | |
93 | #include "locators.h" |
94 | #include "ioconf.h" |
95 | |
96 | #define PCI_CBIO 0x10 |
97 | |
98 | static int twe_aen_get(struct twe_softc *, uint16_t *); |
99 | static void twe_aen_handler(struct twe_ccb *, int); |
100 | static void twe_aen_enqueue(struct twe_softc *sc, uint16_t, int); |
101 | static uint16_t twe_aen_dequeue(struct twe_softc *); |
102 | |
103 | static void twe_attach(device_t, device_t, void *); |
104 | static int twe_rescan(device_t, const char *, const int *); |
105 | static int twe_init_connection(struct twe_softc *); |
106 | static int twe_intr(void *); |
107 | static int twe_match(device_t, cfdata_t, void *); |
108 | static int twe_param_set(struct twe_softc *, int, int, size_t, void *); |
109 | static void twe_poll(struct twe_softc *); |
110 | static int twe_print(void *, const char *); |
111 | static int twe_reset(struct twe_softc *); |
112 | static int twe_status_check(struct twe_softc *, u_int); |
113 | static int twe_status_wait(struct twe_softc *, u_int, int); |
114 | static void twe_describe_controller(struct twe_softc *); |
115 | static void twe_clear_pci_abort(struct twe_softc *sc); |
116 | static void twe_clear_pci_parity_error(struct twe_softc *sc); |
117 | |
118 | static int twe_add_unit(struct twe_softc *, int); |
119 | static int twe_del_unit(struct twe_softc *, int); |
120 | static int twe_init_connection(struct twe_softc *); |
121 | |
122 | static inline u_int32_t twe_inl(struct twe_softc *, int); |
123 | static inline void twe_outl(struct twe_softc *, int, u_int32_t); |
124 | |
125 | extern struct cfdriver twe_cd; |
126 | |
127 | CFATTACH_DECL3_NEW(twe, sizeof(struct twe_softc), |
128 | twe_match, twe_attach, NULL, NULL, twe_rescan, NULL, 0); |
129 | |
130 | /* FreeBSD driver revision for sysctl expected by the 3ware cli */ |
131 | const char twever[] = "1.50.01.002" ; |
132 | |
133 | /* |
134 | * Tables to convert numeric codes to strings. |
135 | */ |
136 | const struct twe_code_table twe_table_status[] = { |
137 | { 0x00, "successful completion" }, |
138 | |
139 | /* info */ |
140 | { 0x42, "command in progress" }, |
141 | { 0x6c, "retrying interface CRC error from UDMA command" }, |
142 | |
143 | /* warning */ |
144 | { 0x81, "redundant/inconsequential request ignored" }, |
145 | { 0x8e, "failed to write zeroes to LBA 0" }, |
146 | { 0x8f, "failed to profile TwinStor zones" }, |
147 | |
148 | /* fatal */ |
149 | { 0xc1, "aborted due to system command or reconfiguration" }, |
150 | { 0xc4, "aborted" }, |
151 | { 0xc5, "access error" }, |
152 | { 0xc6, "access violation" }, |
153 | { 0xc7, "device failure" }, /* high byte may be port # */ |
154 | { 0xc8, "controller error" }, |
155 | { 0xc9, "timed out" }, |
156 | { 0xcb, "invalid unit number" }, |
157 | { 0xcf, "unit not available" }, |
158 | { 0xd2, "undefined opcode" }, |
159 | { 0xdb, "request incompatible with unit" }, |
160 | { 0xdc, "invalid request" }, |
161 | { 0xff, "firmware error, reset requested" }, |
162 | |
163 | { 0, NULL } |
164 | }; |
165 | |
166 | const struct twe_code_table twe_table_unitstate[] = { |
167 | { TWE_PARAM_UNITSTATUS_Normal, "Normal" }, |
168 | { TWE_PARAM_UNITSTATUS_Initialising, "Initializing" }, |
169 | { TWE_PARAM_UNITSTATUS_Degraded, "Degraded" }, |
170 | { TWE_PARAM_UNITSTATUS_Rebuilding, "Rebuilding" }, |
171 | { TWE_PARAM_UNITSTATUS_Verifying, "Verifying" }, |
172 | { TWE_PARAM_UNITSTATUS_Corrupt, "Corrupt" }, |
173 | { TWE_PARAM_UNITSTATUS_Missing, "Missing" }, |
174 | |
175 | { 0, NULL } |
176 | }; |
177 | |
178 | const struct twe_code_table twe_table_unittype[] = { |
179 | /* array descriptor configuration */ |
180 | { TWE_AD_CONFIG_RAID0, "RAID0" }, |
181 | { TWE_AD_CONFIG_RAID1, "RAID1" }, |
182 | { TWE_AD_CONFIG_TwinStor, "TwinStor" }, |
183 | { TWE_AD_CONFIG_RAID5, "RAID5" }, |
184 | { TWE_AD_CONFIG_RAID10, "RAID10" }, |
185 | { TWE_UD_CONFIG_JBOD, "JBOD" }, |
186 | |
187 | { 0, NULL } |
188 | }; |
189 | |
190 | const struct twe_code_table twe_table_stripedepth[] = { |
191 | { TWE_AD_STRIPE_4k, "4K" }, |
192 | { TWE_AD_STRIPE_8k, "8K" }, |
193 | { TWE_AD_STRIPE_16k, "16K" }, |
194 | { TWE_AD_STRIPE_32k, "32K" }, |
195 | { TWE_AD_STRIPE_64k, "64K" }, |
196 | { TWE_AD_STRIPE_128k, "128K" }, |
197 | { TWE_AD_STRIPE_256k, "256K" }, |
198 | { TWE_AD_STRIPE_512k, "512K" }, |
199 | { TWE_AD_STRIPE_1024k, "1024K" }, |
200 | |
201 | { 0, NULL } |
202 | }; |
203 | |
204 | /* |
205 | * Asynchronous event notification messages are qualified: |
206 | * a - not unit/port specific |
207 | * u - unit specific |
208 | * p - port specific |
209 | * |
210 | * They are further qualified with a severity: |
211 | * E - LOG_EMERG |
212 | * a - LOG_ALERT |
213 | * c - LOG_CRIT |
214 | * e - LOG_ERR |
215 | * w - LOG_WARNING |
216 | * n - LOG_NOTICE |
217 | * i - LOG_INFO |
218 | * d - LOG_DEBUG |
219 | * blank - just use printf |
220 | */ |
221 | const struct twe_code_table twe_table_aen[] = { |
222 | { 0x00, "a queue empty" }, |
223 | { 0x01, "a soft reset" }, |
224 | { 0x02, "uc degraded mode" }, |
225 | { 0x03, "aa controller error" }, |
226 | { 0x04, "uE rebuild fail" }, |
227 | { 0x05, "un rebuild done" }, |
228 | { 0x06, "ue incomplete unit" }, |
229 | { 0x07, "un initialization done" }, |
230 | { 0x08, "uw unclean shutdown detected" }, |
231 | { 0x09, "pe drive timeout" }, |
232 | { 0x0a, "pc drive error" }, |
233 | { 0x0b, "un rebuild started" }, |
234 | { 0x0c, "un initialization started" }, |
235 | { 0x0d, "ui logical unit deleted" }, |
236 | { 0x0f, "pc SMART threshold exceeded" }, |
237 | { 0x15, "a table undefined" }, /* XXX: Not in FreeBSD's table */ |
238 | { 0x21, "pe ATA UDMA downgrade" }, |
239 | { 0x22, "pi ATA UDMA upgrade" }, |
240 | { 0x23, "pw sector repair occurred" }, |
241 | { 0x24, "aa SBUF integrity check failure" }, |
242 | { 0x25, "pa lost cached write" }, |
243 | { 0x26, "pa drive ECC error detected" }, |
244 | { 0x27, "pe DCB checksum error" }, |
245 | { 0x28, "pn DCB unsupported version" }, |
246 | { 0x29, "ui verify started" }, |
247 | { 0x2a, "ua verify failed" }, |
248 | { 0x2b, "ui verify complete" }, |
249 | { 0x2c, "pw overwrote bad sector during rebuild" }, |
250 | { 0x2d, "pa encountered bad sector during rebuild" }, |
251 | { 0x2e, "pe replacement drive too small" }, |
252 | { 0x2f, "ue array not previously initialized" }, |
253 | { 0x30, "p drive not supported" }, |
254 | { 0xff, "a aen queue full" }, |
255 | |
256 | { 0, NULL }, |
257 | }; |
258 | |
259 | const char * |
260 | twe_describe_code(const struct twe_code_table *table, uint32_t code) |
261 | { |
262 | |
263 | for (; table->string != NULL; table++) { |
264 | if (table->code == code) |
265 | return (table->string); |
266 | } |
267 | return (NULL); |
268 | } |
269 | |
270 | static inline u_int32_t |
271 | twe_inl(struct twe_softc *sc, int off) |
272 | { |
273 | |
274 | bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, |
275 | BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); |
276 | return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); |
277 | } |
278 | |
279 | static inline void |
280 | twe_outl(struct twe_softc *sc, int off, u_int32_t val) |
281 | { |
282 | |
283 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); |
284 | bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, |
285 | BUS_SPACE_BARRIER_WRITE); |
286 | } |
287 | |
288 | /* |
289 | * Match a supported board. |
290 | */ |
291 | static int |
292 | twe_match(device_t parent, cfdata_t cfdata, void *aux) |
293 | { |
294 | struct pci_attach_args *pa; |
295 | |
296 | pa = aux; |
297 | |
298 | return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE && |
299 | (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE || |
300 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC)); |
301 | } |
302 | |
303 | /* |
304 | * Attach a supported board. |
305 | * |
306 | * XXX This doesn't fail gracefully. |
307 | */ |
308 | static void |
309 | twe_attach(device_t parent, device_t self, void *aux) |
310 | { |
311 | struct pci_attach_args *pa; |
312 | struct twe_softc *sc; |
313 | pci_chipset_tag_t pc; |
314 | pci_intr_handle_t ih; |
315 | pcireg_t csr; |
316 | const char *intrstr; |
317 | int s, size, i, rv, rseg; |
318 | size_t max_segs, max_xfer; |
319 | bus_dma_segment_t seg; |
320 | const struct sysctlnode *node; |
321 | struct twe_cmd *tc; |
322 | struct twe_ccb *ccb; |
323 | char intrbuf[PCI_INTRSTR_LEN]; |
324 | |
325 | sc = device_private(self); |
326 | sc->sc_dev = self; |
327 | pa = aux; |
328 | pc = pa->pa_pc; |
329 | sc->sc_dmat = pa->pa_dmat; |
330 | SIMPLEQ_INIT(&sc->sc_ccb_queue); |
331 | SLIST_INIT(&sc->sc_ccb_freelist); |
332 | |
333 | aprint_naive(": RAID controller\n" ); |
334 | aprint_normal(": 3ware Escalade\n" ); |
335 | |
336 | |
337 | if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, |
338 | &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) { |
339 | aprint_error_dev(self, "can't map i/o space\n" ); |
340 | return; |
341 | } |
342 | |
343 | /* Enable the device. */ |
344 | csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
345 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, |
346 | csr | PCI_COMMAND_MASTER_ENABLE); |
347 | |
348 | /* Map and establish the interrupt. */ |
349 | if (pci_intr_map(pa, &ih)) { |
350 | aprint_error_dev(self, "can't map interrupt\n" ); |
351 | return; |
352 | } |
353 | |
354 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); |
355 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc); |
356 | if (sc->sc_ih == NULL) { |
357 | aprint_error_dev(self, "can't establish interrupt%s%s\n" , |
358 | (intrstr) ? " at " : "" , |
359 | (intrstr) ? intrstr : "" ); |
360 | return; |
361 | } |
362 | |
363 | if (intrstr != NULL) |
364 | aprint_normal_dev(self, "interrupting at %s\n" , intrstr); |
365 | |
366 | /* |
367 | * Allocate and initialise the command blocks and CCBs. |
368 | */ |
369 | size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT; |
370 | |
371 | if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, |
372 | &rseg, BUS_DMA_NOWAIT)) != 0) { |
373 | aprint_error_dev(self, |
374 | "unable to allocate commands, rv = %d\n" , rv); |
375 | return; |
376 | } |
377 | |
378 | if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, |
379 | (void **)&sc->sc_cmds, |
380 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { |
381 | aprint_error_dev(self, |
382 | "unable to map commands, rv = %d\n" , rv); |
383 | return; |
384 | } |
385 | |
386 | if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0, |
387 | BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { |
388 | aprint_error_dev(self, |
389 | "unable to create command DMA map, rv = %d\n" , rv); |
390 | return; |
391 | } |
392 | |
393 | if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds, |
394 | size, NULL, BUS_DMA_NOWAIT)) != 0) { |
395 | aprint_error_dev(self, |
396 | "unable to load command DMA map, rv = %d\n" , rv); |
397 | return; |
398 | } |
399 | |
400 | ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT); |
401 | if (ccb == NULL) { |
402 | aprint_error_dev(self, "unable to allocate memory for ccbs\n" ); |
403 | return; |
404 | } |
405 | |
406 | sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; |
407 | memset(sc->sc_cmds, 0, size); |
408 | |
409 | sc->sc_ccbs = ccb; |
410 | tc = (struct twe_cmd *)sc->sc_cmds; |
411 | max_segs = twe_get_maxsegs(); |
412 | max_xfer = twe_get_maxxfer(max_segs); |
413 | |
414 | for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) { |
415 | ccb->ccb_cmd = tc; |
416 | ccb->ccb_cmdid = i; |
417 | ccb->ccb_flags = 0; |
418 | rv = bus_dmamap_create(sc->sc_dmat, max_xfer, |
419 | max_segs, PAGE_SIZE, 0, |
420 | BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, |
421 | &ccb->ccb_dmamap_xfer); |
422 | if (rv != 0) { |
423 | aprint_error_dev(self, |
424 | "can't create dmamap, rv = %d\n" , rv); |
425 | return; |
426 | } |
427 | |
428 | /* Save the first CCB for AEN retrieval. */ |
429 | if (i != 0) |
430 | SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, |
431 | ccb_chain.slist); |
432 | } |
433 | |
434 | /* Wait for the controller to become ready. */ |
435 | if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) { |
436 | aprint_error_dev(self, "microcontroller not ready\n" ); |
437 | return; |
438 | } |
439 | |
440 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS); |
441 | |
442 | /* Reset the controller. */ |
443 | s = splbio(); |
444 | rv = twe_reset(sc); |
445 | splx(s); |
446 | if (rv) { |
447 | aprint_error_dev(self, "reset failed\n" ); |
448 | return; |
449 | } |
450 | |
451 | /* Initialise connection with controller. */ |
452 | twe_init_connection(sc); |
453 | |
454 | twe_describe_controller(sc); |
455 | |
456 | /* Find and attach RAID array units. */ |
457 | twe_rescan(self, "twe" , 0); |
458 | |
459 | /* ...and finally, enable interrupts. */ |
460 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR | |
461 | TWE_CTL_UNMASK_RESP_INTR | |
462 | TWE_CTL_ENABLE_INTRS); |
463 | |
464 | /* sysctl set-up for 3ware cli */ |
465 | if (sysctl_createv(NULL, 0, NULL, &node, |
466 | 0, CTLTYPE_NODE, device_xname(self), |
467 | SYSCTL_DESCR("twe driver information" ), |
468 | NULL, 0, NULL, 0, |
469 | CTL_HW, CTL_CREATE, CTL_EOL) != 0) { |
470 | aprint_error_dev(self, "could not create %s.%s sysctl node\n" , |
471 | "hw" , device_xname(self)); |
472 | return; |
473 | } |
474 | if ((i = sysctl_createv(NULL, 0, NULL, NULL, |
475 | 0, CTLTYPE_STRING, "driver_version" , |
476 | SYSCTL_DESCR("twe0 driver version" ), |
477 | NULL, 0, __UNCONST(&twever), 0, |
478 | CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL)) |
479 | != 0) { |
480 | aprint_error_dev(self, |
481 | "could not create %s.%s.driver_version sysctl\n" , |
482 | "hw" , device_xname(self)); |
483 | return; |
484 | } |
485 | } |
486 | |
487 | static int |
488 | twe_rescan(device_t self, const char *attr, const int *flags) |
489 | { |
490 | struct twe_softc *sc; |
491 | int i; |
492 | |
493 | sc = device_private(self); |
494 | sc->sc_nunits = 0; |
495 | for (i = 0; i < TWE_MAX_UNITS; i++) |
496 | (void) twe_add_unit(sc, i); |
497 | return 0; |
498 | } |
499 | |
500 | |
501 | void |
502 | twe_register_callbacks(struct twe_softc *sc, int unit, |
503 | const struct twe_callbacks *tcb) |
504 | { |
505 | |
506 | sc->sc_units[unit].td_callbacks = tcb; |
507 | } |
508 | |
509 | static void |
510 | twe_recompute_openings(struct twe_softc *sc) |
511 | { |
512 | struct twe_drive *td; |
513 | int unit, openings; |
514 | |
515 | if (sc->sc_nunits != 0) |
516 | openings = (TWE_MAX_QUEUECNT - 1) / sc->sc_nunits; |
517 | else |
518 | openings = 0; |
519 | if (openings == sc->sc_openings) |
520 | return; |
521 | sc->sc_openings = openings; |
522 | |
523 | #ifdef TWE_DEBUG |
524 | printf("%s: %d array%s, %d openings per array\n" , |
525 | device_xname(sc->sc_dev), sc->sc_nunits, |
526 | sc->sc_nunits == 1 ? "" : "s" , sc->sc_openings); |
527 | #endif |
528 | |
529 | for (unit = 0; unit < TWE_MAX_UNITS; unit++) { |
530 | td = &sc->sc_units[unit]; |
531 | if (td->td_dev != NULL) |
532 | (*td->td_callbacks->tcb_openings)(td->td_dev, |
533 | sc->sc_openings); |
534 | } |
535 | } |
536 | |
537 | static int |
538 | twe_add_unit(struct twe_softc *sc, int unit) |
539 | { |
540 | struct twe_param *dtp, *atp; |
541 | struct twe_array_descriptor *ad; |
542 | struct twe_drive *td; |
543 | struct twe_attach_args twea; |
544 | uint32_t newsize; |
545 | int rv; |
546 | uint16_t dsize; |
547 | uint8_t newtype, newstripe; |
548 | int locs[TWECF_NLOCS]; |
549 | |
550 | if (unit < 0 || unit >= TWE_MAX_UNITS) |
551 | return (EINVAL); |
552 | |
553 | /* Find attached units. */ |
554 | rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY, |
555 | TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp); |
556 | if (rv != 0) { |
557 | aprint_error_dev(sc->sc_dev, |
558 | "error %d fetching unit summary\n" , rv); |
559 | return (rv); |
560 | } |
561 | |
562 | /* For each detected unit, collect size and store in an array. */ |
563 | td = &sc->sc_units[unit]; |
564 | |
565 | /* Unit present? */ |
566 | if ((dtp->tp_data[unit] & TWE_PARAM_UNITSTATUS_Online) == 0) { |
567 | /* |
568 | * XXX Should we check to see if a device has been |
569 | * XXX attached at this index and detach it if it |
570 | * XXX has? ("rescan" semantics) |
571 | */ |
572 | rv = 0; |
573 | goto out; |
574 | } |
575 | |
576 | rv = twe_param_get_2(sc, TWE_PARAM_UNITINFO + unit, |
577 | TWE_PARAM_UNITINFO_DescriptorSize, &dsize); |
578 | if (rv != 0) { |
579 | aprint_error_dev(sc->sc_dev, |
580 | "error %d fetching descriptor size for unit %d\n" , |
581 | rv, unit); |
582 | goto out; |
583 | } |
584 | |
585 | rv = twe_param_get(sc, TWE_PARAM_UNITINFO + unit, |
586 | TWE_PARAM_UNITINFO_Descriptor, dsize - 3, NULL, &atp); |
587 | if (rv != 0) { |
588 | aprint_error_dev(sc->sc_dev, |
589 | "error %d fetching array descriptor for unit %d\n" , |
590 | rv, unit); |
591 | goto out; |
592 | } |
593 | |
594 | ad = (struct twe_array_descriptor *)atp->tp_data; |
595 | newtype = ad->configuration; |
596 | newstripe = ad->stripe_size; |
597 | free(atp, M_DEVBUF); |
598 | |
599 | rv = twe_param_get_4(sc, TWE_PARAM_UNITINFO + unit, |
600 | TWE_PARAM_UNITINFO_Capacity, &newsize); |
601 | if (rv != 0) { |
602 | aprint_error_dev(sc->sc_dev, |
603 | "error %d fetching capacity for unit %d\n" , |
604 | rv, unit); |
605 | goto out; |
606 | } |
607 | |
608 | /* |
609 | * Have a device, so we need to attach it. If there is currently |
610 | * something sitting at the slot, and the parameters are different, |
611 | * then we detach the old device before attaching the new one. |
612 | */ |
613 | if (td->td_dev != NULL && |
614 | td->td_size == newsize && |
615 | td->td_type == newtype && |
616 | td->td_stripe == newstripe) { |
617 | /* Same as the old device; just keep using it. */ |
618 | rv = 0; |
619 | goto out; |
620 | } else if (td->td_dev != NULL) { |
621 | /* Detach the old device first. */ |
622 | (void) config_detach(td->td_dev, DETACH_FORCE); |
623 | td->td_dev = NULL; |
624 | } else if (td->td_size == 0) |
625 | sc->sc_nunits++; |
626 | |
627 | /* |
628 | * Committed to the new array unit; assign its parameters and |
629 | * recompute the number of available command openings. |
630 | */ |
631 | td->td_size = newsize; |
632 | td->td_type = newtype; |
633 | td->td_stripe = newstripe; |
634 | twe_recompute_openings(sc); |
635 | |
636 | twea.twea_unit = unit; |
637 | |
638 | locs[TWECF_UNIT] = unit; |
639 | |
640 | td->td_dev = config_found_sm_loc(sc->sc_dev, "twe" , locs, &twea, |
641 | twe_print, config_stdsubmatch); |
642 | |
643 | rv = 0; |
644 | out: |
645 | free(dtp, M_DEVBUF); |
646 | return (rv); |
647 | } |
648 | |
649 | static int |
650 | twe_del_unit(struct twe_softc *sc, int unit) |
651 | { |
652 | struct twe_drive *td; |
653 | |
654 | if (unit < 0 || unit >= TWE_MAX_UNITS) |
655 | return (EINVAL); |
656 | |
657 | td = &sc->sc_units[unit]; |
658 | if (td->td_size != 0) |
659 | sc->sc_nunits--; |
660 | td->td_size = 0; |
661 | td->td_type = 0; |
662 | td->td_stripe = 0; |
663 | if (td->td_dev != NULL) { |
664 | (void) config_detach(td->td_dev, DETACH_FORCE); |
665 | td->td_dev = NULL; |
666 | } |
667 | twe_recompute_openings(sc); |
668 | return (0); |
669 | } |
670 | |
671 | /* |
672 | * Reset the controller. |
673 | * MUST BE CALLED AT splbio()! |
674 | */ |
675 | static int |
676 | twe_reset(struct twe_softc *sc) |
677 | { |
678 | uint16_t aen; |
679 | u_int status; |
680 | int got, rv; |
681 | |
682 | /* Issue a soft reset. */ |
683 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET | |
684 | TWE_CTL_CLEAR_HOST_INTR | |
685 | TWE_CTL_CLEAR_ATTN_INTR | |
686 | TWE_CTL_MASK_CMD_INTR | |
687 | TWE_CTL_MASK_RESP_INTR | |
688 | TWE_CTL_CLEAR_ERROR_STS | |
689 | TWE_CTL_DISABLE_INTRS); |
690 | |
691 | /* Wait for attention... */ |
692 | if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 30)) { |
693 | aprint_error_dev(sc->sc_dev, |
694 | "timeout waiting for attention interrupt\n" ); |
695 | return (-1); |
696 | } |
697 | |
698 | /* ...and ACK it. */ |
699 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); |
700 | |
701 | /* |
702 | * Pull AENs out of the controller; look for a soft reset AEN. |
703 | * Open code this, since we want to detect reset even if the |
704 | * queue for management tools is full. |
705 | * |
706 | * Note that since: |
707 | * - interrupts are blocked |
708 | * - we have reset the controller |
709 | * - acknowledged the pending ATTENTION |
710 | * that there is no way a pending asynchronous AEN fetch would |
711 | * finish, so clear the flag. |
712 | */ |
713 | sc->sc_flags &= ~TWEF_AEN; |
714 | for (got = 0;;) { |
715 | rv = twe_aen_get(sc, &aen); |
716 | if (rv != 0) |
717 | printf("%s: error %d while draining event queue\n" , |
718 | device_xname(sc->sc_dev), rv); |
719 | if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) |
720 | break; |
721 | if (TWE_AEN_CODE(aen) == TWE_AEN_SOFT_RESET) |
722 | got = 1; |
723 | twe_aen_enqueue(sc, aen, 1); |
724 | } |
725 | |
726 | if (!got) { |
727 | printf("%s: reset not reported\n" , device_xname(sc->sc_dev)); |
728 | return (-1); |
729 | } |
730 | |
731 | /* Check controller status. */ |
732 | status = twe_inl(sc, TWE_REG_STS); |
733 | if (twe_status_check(sc, status)) { |
734 | printf("%s: controller errors detected\n" , |
735 | device_xname(sc->sc_dev)); |
736 | return (-1); |
737 | } |
738 | |
739 | /* Drain the response queue. */ |
740 | for (;;) { |
741 | status = twe_inl(sc, TWE_REG_STS); |
742 | if (twe_status_check(sc, status) != 0) { |
743 | aprint_error_dev(sc->sc_dev, |
744 | "can't drain response queue\n" ); |
745 | return (-1); |
746 | } |
747 | if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0) |
748 | break; |
749 | (void)twe_inl(sc, TWE_REG_RESP_QUEUE); |
750 | } |
751 | |
752 | return (0); |
753 | } |
754 | |
755 | /* |
756 | * Print autoconfiguration message for a sub-device. |
757 | */ |
758 | static int |
759 | twe_print(void *aux, const char *pnp) |
760 | { |
761 | struct twe_attach_args *twea; |
762 | |
763 | twea = aux; |
764 | |
765 | if (pnp != NULL) |
766 | aprint_normal("block device at %s" , pnp); |
767 | aprint_normal(" unit %d" , twea->twea_unit); |
768 | return (UNCONF); |
769 | } |
770 | |
771 | /* |
772 | * Interrupt service routine. |
773 | */ |
774 | static int |
775 | twe_intr(void *arg) |
776 | { |
777 | struct twe_softc *sc; |
778 | u_int status; |
779 | int caught, rv; |
780 | |
781 | sc = arg; |
782 | caught = 0; |
783 | status = twe_inl(sc, TWE_REG_STS); |
784 | twe_status_check(sc, status); |
785 | |
786 | /* Host interrupts - purpose unknown. */ |
787 | if ((status & TWE_STS_HOST_INTR) != 0) { |
788 | #ifdef DEBUG |
789 | printf("%s: host interrupt\n" , device_xname(sc->sc_dev)); |
790 | #endif |
791 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR); |
792 | caught = 1; |
793 | } |
794 | |
795 | /* |
796 | * Attention interrupts, signalled when a controller or child device |
797 | * state change has occurred. |
798 | */ |
799 | if ((status & TWE_STS_ATTN_INTR) != 0) { |
800 | rv = twe_aen_get(sc, NULL); |
801 | if (rv != 0) |
802 | aprint_error_dev(sc->sc_dev, |
803 | "unable to retrieve AEN (%d)\n" , rv); |
804 | else |
805 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); |
806 | caught = 1; |
807 | } |
808 | |
809 | /* |
810 | * Command interrupts, signalled when the controller can accept more |
811 | * commands. We don't use this; instead, we try to submit commands |
812 | * when we receive them, and when other commands have completed. |
813 | * Mask it so we don't get another one. |
814 | */ |
815 | if ((status & TWE_STS_CMD_INTR) != 0) { |
816 | #ifdef DEBUG |
817 | printf("%s: command interrupt\n" , device_xname(sc->sc_dev)); |
818 | #endif |
819 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR); |
820 | caught = 1; |
821 | } |
822 | |
823 | if ((status & TWE_STS_RESP_INTR) != 0) { |
824 | twe_poll(sc); |
825 | caught = 1; |
826 | } |
827 | |
828 | return (caught); |
829 | } |
830 | |
831 | /* |
832 | * Fetch an AEN. Even though this is really like parameter |
833 | * retrieval, we handle this specially, because we issue this |
834 | * AEN retrieval command from interrupt context, and thus |
835 | * reserve a CCB for it to avoid resource shortage. |
836 | * |
837 | * XXX There are still potential resource shortages we could |
838 | * XXX encounter. Consider pre-allocating all AEN-related |
839 | * XXX resources. |
840 | * |
841 | * MUST BE CALLED AT splbio()! |
842 | */ |
843 | static int |
844 | twe_aen_get(struct twe_softc *sc, uint16_t *aenp) |
845 | { |
846 | struct twe_ccb *ccb; |
847 | struct twe_cmd *tc; |
848 | struct twe_param *tp; |
849 | int rv; |
850 | |
851 | /* |
852 | * If we're already retrieving an AEN, just wait; another |
853 | * retrieval will be chained after the current one completes. |
854 | */ |
855 | if (sc->sc_flags & TWEF_AEN) { |
856 | /* |
857 | * It is a fatal software programming error to attempt |
858 | * to fetch an AEN synchronously when an AEN fetch is |
859 | * already pending. |
860 | */ |
861 | KASSERT(aenp == NULL); |
862 | return (0); |
863 | } |
864 | |
865 | tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); |
866 | if (tp == NULL) |
867 | return (ENOMEM); |
868 | |
869 | ccb = twe_ccb_alloc(sc, |
870 | TWE_CCB_AEN | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); |
871 | KASSERT(ccb != NULL); |
872 | |
873 | ccb->ccb_data = tp; |
874 | ccb->ccb_datasize = TWE_SECTOR_SIZE; |
875 | ccb->ccb_tx.tx_handler = (aenp == NULL) ? twe_aen_handler : NULL; |
876 | ccb->ccb_tx.tx_context = tp; |
877 | ccb->ccb_tx.tx_dv = sc->sc_dev; |
878 | |
879 | tc = ccb->ccb_cmd; |
880 | tc->tc_size = 2; |
881 | tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); |
882 | tc->tc_unit = 0; |
883 | tc->tc_count = htole16(1); |
884 | |
885 | /* Fill in the outbound parameter data. */ |
886 | tp->tp_table_id = htole16(TWE_PARAM_AEN); |
887 | tp->tp_param_id = TWE_PARAM_AEN_UnitCode; |
888 | tp->tp_param_size = 2; |
889 | |
890 | /* Map the transfer. */ |
891 | if ((rv = twe_ccb_map(sc, ccb)) != 0) { |
892 | twe_ccb_free(sc, ccb); |
893 | goto done; |
894 | } |
895 | |
896 | /* Enqueue the command and wait. */ |
897 | if (aenp != NULL) { |
898 | rv = twe_ccb_poll(sc, ccb, 5); |
899 | twe_ccb_unmap(sc, ccb); |
900 | twe_ccb_free(sc, ccb); |
901 | if (rv == 0) |
902 | *aenp = le16toh(*(uint16_t *)tp->tp_data); |
903 | free(tp, M_DEVBUF); |
904 | } else { |
905 | sc->sc_flags |= TWEF_AEN; |
906 | twe_ccb_enqueue(sc, ccb); |
907 | rv = 0; |
908 | } |
909 | |
910 | done: |
911 | return (rv); |
912 | } |
913 | |
914 | /* |
915 | * Handle an AEN returned by the controller. |
916 | * MUST BE CALLED AT splbio()! |
917 | */ |
918 | static void |
919 | twe_aen_handler(struct twe_ccb *ccb, int error) |
920 | { |
921 | struct twe_softc *sc; |
922 | struct twe_param *tp; |
923 | uint16_t aen; |
924 | int rv; |
925 | |
926 | sc = device_private(ccb->ccb_tx.tx_dv); |
927 | tp = ccb->ccb_tx.tx_context; |
928 | twe_ccb_unmap(sc, ccb); |
929 | |
930 | sc->sc_flags &= ~TWEF_AEN; |
931 | |
932 | if (error) { |
933 | aprint_error_dev(sc->sc_dev, "error retrieving AEN\n" ); |
934 | aen = TWE_AEN_QUEUE_EMPTY; |
935 | } else |
936 | aen = le16toh(*(u_int16_t *)tp->tp_data); |
937 | free(tp, M_DEVBUF); |
938 | twe_ccb_free(sc, ccb); |
939 | |
940 | if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) { |
941 | twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); |
942 | return; |
943 | } |
944 | |
945 | twe_aen_enqueue(sc, aen, 0); |
946 | |
947 | /* |
948 | * Chain another retrieval in case interrupts have been |
949 | * coalesced. |
950 | */ |
951 | rv = twe_aen_get(sc, NULL); |
952 | if (rv != 0) |
953 | aprint_error_dev(sc->sc_dev, |
954 | "unable to retrieve AEN (%d)\n" , rv); |
955 | } |
956 | |
957 | static void |
958 | twe_aen_enqueue(struct twe_softc *sc, uint16_t aen, int quiet) |
959 | { |
960 | const char *str, *msg; |
961 | int s, next, nextnext, level; |
962 | |
963 | /* |
964 | * First report the AEN on the console. Maybe. |
965 | */ |
966 | if (! quiet) { |
967 | str = twe_describe_code(twe_table_aen, TWE_AEN_CODE(aen)); |
968 | if (str == NULL) { |
969 | aprint_error_dev(sc->sc_dev, |
970 | "unknown AEN 0x%04x\n" , aen); |
971 | } else { |
972 | msg = str + 3; |
973 | switch (str[1]) { |
974 | case 'E': level = LOG_EMERG; break; |
975 | case 'a': level = LOG_ALERT; break; |
976 | case 'c': level = LOG_CRIT; break; |
977 | case 'e': level = LOG_ERR; break; |
978 | case 'w': level = LOG_WARNING; break; |
979 | case 'n': level = LOG_NOTICE; break; |
980 | case 'i': level = LOG_INFO; break; |
981 | case 'd': level = LOG_DEBUG; break; |
982 | default: |
983 | /* Don't use syslog. */ |
984 | level = -1; |
985 | } |
986 | |
987 | if (level < 0) { |
988 | switch (str[0]) { |
989 | case 'u': |
990 | case 'p': |
991 | printf("%s: %s %d: %s\n" , |
992 | device_xname(sc->sc_dev), |
993 | str[0] == 'u' ? "unit" : "port" , |
994 | TWE_AEN_UNIT(aen), msg); |
995 | break; |
996 | |
997 | default: |
998 | printf("%s: %s\n" , |
999 | device_xname(sc->sc_dev), msg); |
1000 | } |
1001 | } else { |
1002 | switch (str[0]) { |
1003 | case 'u': |
1004 | case 'p': |
1005 | log(level, "%s: %s %d: %s\n" , |
1006 | device_xname(sc->sc_dev), |
1007 | str[0] == 'u' ? "unit" : "port" , |
1008 | TWE_AEN_UNIT(aen), msg); |
1009 | break; |
1010 | |
1011 | default: |
1012 | log(level, "%s: %s\n" , |
1013 | device_xname(sc->sc_dev), msg); |
1014 | } |
1015 | } |
1016 | } |
1017 | } |
1018 | |
1019 | /* Now enqueue the AEN for mangement tools. */ |
1020 | s = splbio(); |
1021 | |
1022 | next = (sc->sc_aen_head + 1) % TWE_AEN_Q_LENGTH; |
1023 | nextnext = (sc->sc_aen_head + 2) % TWE_AEN_Q_LENGTH; |
1024 | |
1025 | /* |
1026 | * If this is the last free slot, then queue up a "queue |
1027 | * full" message. |
1028 | */ |
1029 | if (nextnext == sc->sc_aen_tail) |
1030 | aen = TWE_AEN_QUEUE_FULL; |
1031 | |
1032 | if (next != sc->sc_aen_tail) { |
1033 | sc->sc_aen_queue[sc->sc_aen_head] = aen; |
1034 | sc->sc_aen_head = next; |
1035 | } |
1036 | |
1037 | if (sc->sc_flags & TWEF_AENQ_WAIT) { |
1038 | sc->sc_flags &= ~TWEF_AENQ_WAIT; |
1039 | wakeup(&sc->sc_aen_queue); |
1040 | } |
1041 | |
1042 | splx(s); |
1043 | } |
1044 | |
1045 | /* NOTE: Must be called at splbio(). */ |
1046 | static uint16_t |
1047 | twe_aen_dequeue(struct twe_softc *sc) |
1048 | { |
1049 | uint16_t aen; |
1050 | |
1051 | if (sc->sc_aen_tail == sc->sc_aen_head) |
1052 | aen = TWE_AEN_QUEUE_EMPTY; |
1053 | else { |
1054 | aen = sc->sc_aen_queue[sc->sc_aen_tail]; |
1055 | sc->sc_aen_tail = (sc->sc_aen_tail + 1) % TWE_AEN_Q_LENGTH; |
1056 | } |
1057 | |
1058 | return (aen); |
1059 | } |
1060 | |
1061 | /* |
1062 | * These are short-hand functions that execute TWE_OP_GET_PARAM to |
1063 | * fetch 1, 2, and 4 byte parameter values, respectively. |
1064 | */ |
1065 | int |
1066 | twe_param_get_1(struct twe_softc *sc, int table_id, int param_id, |
1067 | uint8_t *valp) |
1068 | { |
1069 | struct twe_param *tp; |
1070 | int rv; |
1071 | |
1072 | rv = twe_param_get(sc, table_id, param_id, 1, NULL, &tp); |
1073 | if (rv != 0) |
1074 | return (rv); |
1075 | *valp = *(uint8_t *)tp->tp_data; |
1076 | free(tp, M_DEVBUF); |
1077 | return (0); |
1078 | } |
1079 | |
1080 | int |
1081 | twe_param_get_2(struct twe_softc *sc, int table_id, int param_id, |
1082 | uint16_t *valp) |
1083 | { |
1084 | struct twe_param *tp; |
1085 | int rv; |
1086 | |
1087 | rv = twe_param_get(sc, table_id, param_id, 2, NULL, &tp); |
1088 | if (rv != 0) |
1089 | return (rv); |
1090 | *valp = le16toh(*(uint16_t *)tp->tp_data); |
1091 | free(tp, M_DEVBUF); |
1092 | return (0); |
1093 | } |
1094 | |
1095 | int |
1096 | twe_param_get_4(struct twe_softc *sc, int table_id, int param_id, |
1097 | uint32_t *valp) |
1098 | { |
1099 | struct twe_param *tp; |
1100 | int rv; |
1101 | |
1102 | rv = twe_param_get(sc, table_id, param_id, 4, NULL, &tp); |
1103 | if (rv != 0) |
1104 | return (rv); |
1105 | *valp = le32toh(*(uint32_t *)tp->tp_data); |
1106 | free(tp, M_DEVBUF); |
1107 | return (0); |
1108 | } |
1109 | |
1110 | /* |
1111 | * Execute a TWE_OP_GET_PARAM command. If a callback function is provided, |
1112 | * it will be called with generated context when the command has completed. |
1113 | * If no callback is provided, the command will be executed synchronously |
1114 | * and a pointer to a buffer containing the data returned. |
1115 | * |
1116 | * The caller or callback is responsible for freeing the buffer. |
1117 | * |
1118 | * NOTE: We assume we can sleep here to wait for a CCB to become available. |
1119 | */ |
1120 | int |
1121 | twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size, |
1122 | void (*func)(struct twe_ccb *, int), struct twe_param **pbuf) |
1123 | { |
1124 | struct twe_ccb *ccb; |
1125 | struct twe_cmd *tc; |
1126 | struct twe_param *tp; |
1127 | int rv, s; |
1128 | |
1129 | tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); |
1130 | if (tp == NULL) |
1131 | return ENOMEM; |
1132 | |
1133 | ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); |
1134 | KASSERT(ccb != NULL); |
1135 | |
1136 | ccb->ccb_data = tp; |
1137 | ccb->ccb_datasize = TWE_SECTOR_SIZE; |
1138 | ccb->ccb_tx.tx_handler = func; |
1139 | ccb->ccb_tx.tx_context = tp; |
1140 | ccb->ccb_tx.tx_dv = sc->sc_dev; |
1141 | |
1142 | tc = ccb->ccb_cmd; |
1143 | tc->tc_size = 2; |
1144 | tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); |
1145 | tc->tc_unit = 0; |
1146 | tc->tc_count = htole16(1); |
1147 | |
1148 | /* Fill in the outbound parameter data. */ |
1149 | tp->tp_table_id = htole16(table_id); |
1150 | tp->tp_param_id = param_id; |
1151 | tp->tp_param_size = size; |
1152 | |
1153 | /* Map the transfer. */ |
1154 | if ((rv = twe_ccb_map(sc, ccb)) != 0) { |
1155 | twe_ccb_free(sc, ccb); |
1156 | goto done; |
1157 | } |
1158 | |
1159 | /* Submit the command and either wait or let the callback handle it. */ |
1160 | if (func == NULL) { |
1161 | s = splbio(); |
1162 | rv = twe_ccb_poll(sc, ccb, 5); |
1163 | twe_ccb_unmap(sc, ccb); |
1164 | twe_ccb_free(sc, ccb); |
1165 | splx(s); |
1166 | } else { |
1167 | #ifdef DEBUG |
1168 | if (pbuf != NULL) |
1169 | panic("both func and pbuf defined" ); |
1170 | #endif |
1171 | twe_ccb_enqueue(sc, ccb); |
1172 | return 0; |
1173 | } |
1174 | |
1175 | done: |
1176 | if (pbuf == NULL || rv != 0) |
1177 | free(tp, M_DEVBUF); |
1178 | else if (pbuf != NULL && rv == 0) |
1179 | *pbuf = tp; |
1180 | return rv; |
1181 | } |
1182 | |
1183 | /* |
1184 | * Execute a TWE_OP_SET_PARAM command. |
1185 | * |
1186 | * NOTE: We assume we can sleep here to wait for a CCB to become available. |
1187 | */ |
1188 | static int |
1189 | twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size, |
1190 | void *sbuf) |
1191 | { |
1192 | struct twe_ccb *ccb; |
1193 | struct twe_cmd *tc; |
1194 | struct twe_param *tp; |
1195 | int rv, s; |
1196 | |
1197 | tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); |
1198 | if (tp == NULL) |
1199 | return ENOMEM; |
1200 | |
1201 | ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); |
1202 | KASSERT(ccb != NULL); |
1203 | |
1204 | ccb->ccb_data = tp; |
1205 | ccb->ccb_datasize = TWE_SECTOR_SIZE; |
1206 | ccb->ccb_tx.tx_handler = 0; |
1207 | ccb->ccb_tx.tx_context = tp; |
1208 | ccb->ccb_tx.tx_dv = sc->sc_dev; |
1209 | |
1210 | tc = ccb->ccb_cmd; |
1211 | tc->tc_size = 2; |
1212 | tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5); |
1213 | tc->tc_unit = 0; |
1214 | tc->tc_count = htole16(1); |
1215 | |
1216 | /* Fill in the outbound parameter data. */ |
1217 | tp->tp_table_id = htole16(table_id); |
1218 | tp->tp_param_id = param_id; |
1219 | tp->tp_param_size = size; |
1220 | memcpy(tp->tp_data, sbuf, size); |
1221 | |
1222 | /* Map the transfer. */ |
1223 | if ((rv = twe_ccb_map(sc, ccb)) != 0) { |
1224 | twe_ccb_free(sc, ccb); |
1225 | goto done; |
1226 | } |
1227 | |
1228 | /* Submit the command and wait. */ |
1229 | s = splbio(); |
1230 | rv = twe_ccb_poll(sc, ccb, 5); |
1231 | twe_ccb_unmap(sc, ccb); |
1232 | twe_ccb_free(sc, ccb); |
1233 | splx(s); |
1234 | done: |
1235 | free(tp, M_DEVBUF); |
1236 | return (rv); |
1237 | } |
1238 | |
1239 | /* |
1240 | * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error. |
1241 | * Must be called with interrupts blocked. |
1242 | */ |
1243 | static int |
1244 | twe_init_connection(struct twe_softc *sc) |
1245 | { |
1246 | struct twe_ccb *ccb; |
1247 | struct twe_cmd *tc; |
1248 | int rv; |
1249 | |
1250 | if ((ccb = twe_ccb_alloc(sc, 0)) == NULL) |
1251 | return (EAGAIN); |
1252 | |
1253 | /* Build the command. */ |
1254 | tc = ccb->ccb_cmd; |
1255 | tc->tc_size = 3; |
1256 | tc->tc_opcode = TWE_OP_INIT_CONNECTION; |
1257 | tc->tc_unit = 0; |
1258 | tc->tc_count = htole16(TWE_MAX_CMDS); |
1259 | tc->tc_args.init_connection.response_queue_pointer = 0; |
1260 | |
1261 | /* Submit the command for immediate execution. */ |
1262 | rv = twe_ccb_poll(sc, ccb, 5); |
1263 | twe_ccb_free(sc, ccb); |
1264 | return (rv); |
1265 | } |
1266 | |
1267 | /* |
1268 | * Poll the controller for completed commands. Must be called with |
1269 | * interrupts blocked. |
1270 | */ |
1271 | static void |
1272 | twe_poll(struct twe_softc *sc) |
1273 | { |
1274 | struct twe_ccb *ccb; |
1275 | int found; |
1276 | u_int status, cmdid; |
1277 | |
1278 | found = 0; |
1279 | |
1280 | for (;;) { |
1281 | status = twe_inl(sc, TWE_REG_STS); |
1282 | twe_status_check(sc, status); |
1283 | |
1284 | if ((status & TWE_STS_RESP_QUEUE_EMPTY)) |
1285 | break; |
1286 | |
1287 | found = 1; |
1288 | cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE); |
1289 | cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT; |
1290 | if (cmdid >= TWE_MAX_QUEUECNT) { |
1291 | aprint_error_dev(sc->sc_dev, "bad cmdid %d\n" , cmdid); |
1292 | continue; |
1293 | } |
1294 | |
1295 | ccb = sc->sc_ccbs + cmdid; |
1296 | if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) { |
1297 | printf("%s: CCB for cmdid %d not active\n" , |
1298 | device_xname(sc->sc_dev), cmdid); |
1299 | continue; |
1300 | } |
1301 | ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE; |
1302 | |
1303 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1304 | (char *)ccb->ccb_cmd - (char *)sc->sc_cmds, |
1305 | sizeof(struct twe_cmd), |
1306 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1307 | |
1308 | /* Pass notification to upper layers. */ |
1309 | if (ccb->ccb_tx.tx_handler != NULL) |
1310 | (*ccb->ccb_tx.tx_handler)(ccb, |
1311 | ccb->ccb_cmd->tc_status != 0 ? EIO : 0); |
1312 | } |
1313 | |
1314 | /* If any commands have completed, run the software queue. */ |
1315 | if (found) |
1316 | twe_ccb_enqueue(sc, NULL); |
1317 | } |
1318 | |
1319 | /* |
1320 | * Wait for `status' to be set in the controller status register. Return |
1321 | * zero if found, non-zero if the operation timed out. |
1322 | */ |
1323 | static int |
1324 | twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo) |
1325 | { |
1326 | |
1327 | for (timo *= 10; timo != 0; timo--) { |
1328 | if ((twe_inl(sc, TWE_REG_STS) & status) == status) |
1329 | break; |
1330 | delay(100000); |
1331 | } |
1332 | |
1333 | return (timo == 0); |
1334 | } |
1335 | |
1336 | /* |
1337 | * Clear a PCI parity error. |
1338 | */ |
1339 | static void |
1340 | twe_clear_pci_parity_error(struct twe_softc *sc) |
1341 | { |
1342 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, |
1343 | TWE_CTL_CLEAR_PARITY_ERROR); |
1344 | |
1345 | //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2); |
1346 | } |
1347 | |
1348 | |
1349 | /* |
1350 | * Clear a PCI abort. |
1351 | */ |
1352 | static void |
1353 | twe_clear_pci_abort(struct twe_softc *sc) |
1354 | { |
1355 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, TWE_CTL_CLEAR_PCI_ABORT); |
1356 | |
1357 | //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2); |
1358 | } |
1359 | |
1360 | /* |
1361 | * Complain if the status bits aren't what we expect. |
1362 | */ |
1363 | static int |
1364 | twe_status_check(struct twe_softc *sc, u_int status) |
1365 | { |
1366 | int rv; |
1367 | |
1368 | rv = 0; |
1369 | |
1370 | if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) { |
1371 | aprint_error_dev(sc->sc_dev, "missing status bits: 0x%08x\n" , |
1372 | status & ~TWE_STS_EXPECTED_BITS); |
1373 | rv = -1; |
1374 | } |
1375 | |
1376 | if ((status & TWE_STS_UNEXPECTED_BITS) != 0) { |
1377 | aprint_error_dev(sc->sc_dev, "unexpected status bits: 0x%08x\n" , |
1378 | status & TWE_STS_UNEXPECTED_BITS); |
1379 | rv = -1; |
1380 | if (status & TWE_STS_PCI_PARITY_ERROR) { |
1381 | aprint_error_dev(sc->sc_dev, "PCI parity error: Reseat" |
1382 | " card, move card or buggy device present.\n" ); |
1383 | twe_clear_pci_parity_error(sc); |
1384 | } |
1385 | if (status & TWE_STS_PCI_ABORT) { |
1386 | aprint_error_dev(sc->sc_dev, "PCI abort, clearing.\n" ); |
1387 | twe_clear_pci_abort(sc); |
1388 | } |
1389 | } |
1390 | |
1391 | return (rv); |
1392 | } |
1393 | |
1394 | /* |
1395 | * Allocate and initialise a CCB. |
1396 | */ |
1397 | static inline void |
1398 | twe_ccb_init(struct twe_softc *sc, struct twe_ccb *ccb, int flags) |
1399 | { |
1400 | struct twe_cmd *tc; |
1401 | |
1402 | ccb->ccb_tx.tx_handler = NULL; |
1403 | ccb->ccb_flags = flags; |
1404 | tc = ccb->ccb_cmd; |
1405 | tc->tc_status = 0; |
1406 | tc->tc_flags = 0; |
1407 | tc->tc_cmdid = ccb->ccb_cmdid; |
1408 | } |
1409 | |
1410 | struct twe_ccb * |
1411 | twe_ccb_alloc(struct twe_softc *sc, int flags) |
1412 | { |
1413 | struct twe_ccb *ccb; |
1414 | int s; |
1415 | |
1416 | s = splbio(); |
1417 | if (__predict_false((flags & TWE_CCB_AEN) != 0)) { |
1418 | /* Use the reserved CCB. */ |
1419 | ccb = sc->sc_ccbs; |
1420 | } else { |
1421 | /* Allocate a CCB and command block. */ |
1422 | if (__predict_false((ccb = |
1423 | SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { |
1424 | splx(s); |
1425 | return (NULL); |
1426 | } |
1427 | SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); |
1428 | } |
1429 | #ifdef DIAGNOSTIC |
1430 | if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0) |
1431 | panic("twe_ccb_alloc: got reserved CCB for non-AEN" ); |
1432 | if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) |
1433 | panic("twe_ccb_alloc: CCB %ld already allocated" , |
1434 | (long)(ccb - sc->sc_ccbs)); |
1435 | flags |= TWE_CCB_ALLOCED; |
1436 | #endif |
1437 | splx(s); |
1438 | |
1439 | twe_ccb_init(sc, ccb, flags); |
1440 | return (ccb); |
1441 | } |
1442 | |
1443 | struct twe_ccb * |
1444 | twe_ccb_alloc_wait(struct twe_softc *sc, int flags) |
1445 | { |
1446 | struct twe_ccb *ccb; |
1447 | int s; |
1448 | |
1449 | KASSERT((flags & TWE_CCB_AEN) == 0); |
1450 | |
1451 | s = splbio(); |
1452 | while (__predict_false((ccb = |
1453 | SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { |
1454 | sc->sc_flags |= TWEF_WAIT_CCB; |
1455 | (void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb" , 0); |
1456 | } |
1457 | SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); |
1458 | #ifdef DIAGNOSTIC |
1459 | if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) |
1460 | panic("twe_ccb_alloc_wait: CCB %ld already allocated" , |
1461 | (long)(ccb - sc->sc_ccbs)); |
1462 | flags |= TWE_CCB_ALLOCED; |
1463 | #endif |
1464 | splx(s); |
1465 | |
1466 | twe_ccb_init(sc, ccb, flags); |
1467 | return (ccb); |
1468 | } |
1469 | |
1470 | /* |
1471 | * Free a CCB. |
1472 | */ |
1473 | void |
1474 | twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb) |
1475 | { |
1476 | int s; |
1477 | |
1478 | s = splbio(); |
1479 | if ((ccb->ccb_flags & TWE_CCB_AEN) == 0) { |
1480 | SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist); |
1481 | if (__predict_false((sc->sc_flags & TWEF_WAIT_CCB) != 0)) { |
1482 | sc->sc_flags &= ~TWEF_WAIT_CCB; |
1483 | wakeup(&sc->sc_ccb_freelist); |
1484 | } |
1485 | } |
1486 | ccb->ccb_flags = 0; |
1487 | splx(s); |
1488 | } |
1489 | |
1490 | /* |
1491 | * Map the specified CCB's command block and data buffer (if any) into |
1492 | * controller visible space. Perform DMA synchronisation. |
1493 | */ |
1494 | int |
1495 | twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb) |
1496 | { |
1497 | struct twe_cmd *tc; |
1498 | int flags, nsegs, i, s, rv; |
1499 | void *data; |
1500 | |
1501 | /* |
1502 | * The data as a whole must be 512-byte aligned. |
1503 | */ |
1504 | if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) { |
1505 | s = splvm(); |
1506 | /* XXX */ |
1507 | rv = uvm_km_kmem_alloc(kmem_va_arena, |
1508 | ccb->ccb_datasize, (VM_NOSLEEP | VM_INSTANTFIT), |
1509 | (vmem_addr_t *)&ccb->ccb_abuf); |
1510 | splx(s); |
1511 | data = (void *)ccb->ccb_abuf; |
1512 | if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) |
1513 | memcpy(data, ccb->ccb_data, ccb->ccb_datasize); |
1514 | } else { |
1515 | ccb->ccb_abuf = (vaddr_t)0; |
1516 | data = ccb->ccb_data; |
1517 | } |
1518 | |
1519 | /* |
1520 | * Map the data buffer into bus space and build the S/G list. |
1521 | */ |
1522 | rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data, |
1523 | ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | |
1524 | ((ccb->ccb_flags & TWE_CCB_DATA_IN) ? |
1525 | BUS_DMA_READ : BUS_DMA_WRITE)); |
1526 | if (rv != 0) { |
1527 | if (ccb->ccb_abuf != (vaddr_t)0) { |
1528 | s = splvm(); |
1529 | /* XXX */ |
1530 | uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf, |
1531 | ccb->ccb_datasize); |
1532 | splx(s); |
1533 | } |
1534 | return (rv); |
1535 | } |
1536 | |
1537 | nsegs = ccb->ccb_dmamap_xfer->dm_nsegs; |
1538 | tc = ccb->ccb_cmd; |
1539 | tc->tc_size += 2 * nsegs; |
1540 | |
1541 | /* The location of the S/G list is dependent upon command type. */ |
1542 | switch (tc->tc_opcode >> 5) { |
1543 | case 2: |
1544 | for (i = 0; i < nsegs; i++) { |
1545 | tc->tc_args.param.sgl[i].tsg_address = |
1546 | htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); |
1547 | tc->tc_args.param.sgl[i].tsg_length = |
1548 | htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); |
1549 | } |
1550 | /* XXX Needed? */ |
1551 | for (; i < TWE_SG_SIZE; i++) { |
1552 | tc->tc_args.param.sgl[i].tsg_address = 0; |
1553 | tc->tc_args.param.sgl[i].tsg_length = 0; |
1554 | } |
1555 | break; |
1556 | case 3: |
1557 | for (i = 0; i < nsegs; i++) { |
1558 | tc->tc_args.io.sgl[i].tsg_address = |
1559 | htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); |
1560 | tc->tc_args.io.sgl[i].tsg_length = |
1561 | htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); |
1562 | } |
1563 | /* XXX Needed? */ |
1564 | for (; i < TWE_SG_SIZE; i++) { |
1565 | tc->tc_args.io.sgl[i].tsg_address = 0; |
1566 | tc->tc_args.io.sgl[i].tsg_length = 0; |
1567 | } |
1568 | break; |
1569 | default: |
1570 | /* |
1571 | * In all likelihood, this is a command passed from |
1572 | * management tools in userspace where no S/G list is |
1573 | * necessary because no data is being passed. |
1574 | */ |
1575 | break; |
1576 | } |
1577 | |
1578 | if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) |
1579 | flags = BUS_DMASYNC_PREREAD; |
1580 | else |
1581 | flags = 0; |
1582 | if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) |
1583 | flags |= BUS_DMASYNC_PREWRITE; |
1584 | |
1585 | bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, |
1586 | ccb->ccb_datasize, flags); |
1587 | return (0); |
1588 | } |
1589 | |
1590 | /* |
1591 | * Unmap the specified CCB's command block and data buffer (if any) and |
1592 | * perform DMA synchronisation. |
1593 | */ |
1594 | void |
1595 | twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb) |
1596 | { |
1597 | int flags, s; |
1598 | |
1599 | if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) |
1600 | flags = BUS_DMASYNC_POSTREAD; |
1601 | else |
1602 | flags = 0; |
1603 | if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) |
1604 | flags |= BUS_DMASYNC_POSTWRITE; |
1605 | |
1606 | bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, |
1607 | ccb->ccb_datasize, flags); |
1608 | bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); |
1609 | |
1610 | if (ccb->ccb_abuf != (vaddr_t)0) { |
1611 | if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) |
1612 | memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf, |
1613 | ccb->ccb_datasize); |
1614 | s = splvm(); |
1615 | /* XXX */ |
1616 | uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf, |
1617 | ccb->ccb_datasize); |
1618 | splx(s); |
1619 | } |
1620 | } |
1621 | |
1622 | /* |
1623 | * Submit a command to the controller and poll on completion. Return |
1624 | * non-zero on timeout (but don't check status, as some command types don't |
1625 | * return status). Must be called with interrupts blocked. |
1626 | */ |
1627 | int |
1628 | twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo) |
1629 | { |
1630 | int rv; |
1631 | |
1632 | if ((rv = twe_ccb_submit(sc, ccb)) != 0) |
1633 | return (rv); |
1634 | |
1635 | for (timo *= 1000; timo != 0; timo--) { |
1636 | twe_poll(sc); |
1637 | if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0) |
1638 | break; |
1639 | DELAY(100); |
1640 | } |
1641 | |
1642 | return (timo == 0); |
1643 | } |
1644 | |
1645 | /* |
1646 | * If a CCB is specified, enqueue it. Pull CCBs off the software queue in |
1647 | * the order that they were enqueued and try to submit their command blocks |
1648 | * to the controller for execution. |
1649 | */ |
1650 | void |
1651 | twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb) |
1652 | { |
1653 | int s; |
1654 | |
1655 | s = splbio(); |
1656 | |
1657 | if (ccb != NULL) |
1658 | SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq); |
1659 | |
1660 | while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) { |
1661 | if (twe_ccb_submit(sc, ccb)) |
1662 | break; |
1663 | SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq); |
1664 | } |
1665 | |
1666 | splx(s); |
1667 | } |
1668 | |
1669 | /* |
1670 | * Submit the command block associated with the specified CCB to the |
1671 | * controller for execution. Must be called with interrupts blocked. |
1672 | */ |
1673 | int |
1674 | twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb) |
1675 | { |
1676 | bus_addr_t pa; |
1677 | int rv; |
1678 | u_int status; |
1679 | |
1680 | /* Check to see if we can post a command. */ |
1681 | status = twe_inl(sc, TWE_REG_STS); |
1682 | twe_status_check(sc, status); |
1683 | |
1684 | if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) { |
1685 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1686 | (char *)ccb->ccb_cmd - (char *)sc->sc_cmds, |
1687 | sizeof(struct twe_cmd), |
1688 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1689 | #ifdef DIAGNOSTIC |
1690 | if ((ccb->ccb_flags & TWE_CCB_ALLOCED) == 0) |
1691 | panic("%s: CCB %ld not ALLOCED\n" , |
1692 | device_xname(sc->sc_dev), (long)(ccb - sc->sc_ccbs)); |
1693 | #endif |
1694 | ccb->ccb_flags |= TWE_CCB_ACTIVE; |
1695 | pa = sc->sc_cmds_paddr + |
1696 | ccb->ccb_cmdid * sizeof(struct twe_cmd); |
1697 | twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa); |
1698 | rv = 0; |
1699 | } else |
1700 | rv = EBUSY; |
1701 | |
1702 | return (rv); |
1703 | } |
1704 | |
1705 | |
1706 | /* |
1707 | * Accept an open operation on the control device. |
1708 | */ |
1709 | static int |
1710 | tweopen(dev_t dev, int flag, int mode, struct lwp *l) |
1711 | { |
1712 | struct twe_softc *twe; |
1713 | |
1714 | if ((twe = device_lookup_private(&twe_cd, minor(dev))) == NULL) |
1715 | return (ENXIO); |
1716 | if ((twe->sc_flags & TWEF_OPEN) != 0) |
1717 | return (EBUSY); |
1718 | |
1719 | twe->sc_flags |= TWEF_OPEN; |
1720 | return (0); |
1721 | } |
1722 | |
1723 | /* |
1724 | * Accept the last close on the control device. |
1725 | */ |
1726 | static int |
1727 | tweclose(dev_t dev, int flag, int mode, |
1728 | struct lwp *l) |
1729 | { |
1730 | struct twe_softc *twe; |
1731 | |
1732 | twe = device_lookup_private(&twe_cd, minor(dev)); |
1733 | twe->sc_flags &= ~TWEF_OPEN; |
1734 | return (0); |
1735 | } |
1736 | |
1737 | void |
1738 | twe_ccb_wait_handler(struct twe_ccb *ccb, int error) |
1739 | { |
1740 | |
1741 | /* Just wake up the sleeper. */ |
1742 | wakeup(ccb); |
1743 | } |
1744 | |
1745 | /* |
1746 | * Handle control operations. |
1747 | */ |
1748 | static int |
1749 | tweioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) |
1750 | { |
1751 | struct twe_softc *twe; |
1752 | struct twe_ccb *ccb; |
1753 | struct twe_param *param; |
1754 | struct twe_usercommand *tu; |
1755 | struct twe_paramcommand *tp; |
1756 | struct twe_drivecommand *td; |
1757 | void *pdata = NULL; |
1758 | int s, error = 0; |
1759 | u_int8_t cmdid; |
1760 | |
1761 | twe = device_lookup_private(&twe_cd, minor(dev)); |
1762 | tu = (struct twe_usercommand *)data; |
1763 | tp = (struct twe_paramcommand *)data; |
1764 | td = (struct twe_drivecommand *)data; |
1765 | |
1766 | /* This is intended to be compatible with the FreeBSD interface. */ |
1767 | switch (cmd) { |
1768 | case TWEIO_COMMAND: |
1769 | error = kauth_authorize_device_passthru(l->l_cred, dev, |
1770 | KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data); |
1771 | if (error) |
1772 | return (error); |
1773 | |
1774 | /* XXX mutex */ |
1775 | if (tu->tu_size > 0) { |
1776 | /* |
1777 | * XXX Handle > TWE_SECTOR_SIZE? Let's see if |
1778 | * it's really necessary, first. |
1779 | */ |
1780 | if (tu->tu_size > TWE_SECTOR_SIZE) { |
1781 | #ifdef TWE_DEBUG |
1782 | printf("%s: TWEIO_COMMAND: tu_size = %zu\n" , |
1783 | device_xname(twe->sc_dev), tu->tu_size); |
1784 | #endif |
1785 | return EINVAL; |
1786 | } |
1787 | pdata = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK); |
1788 | error = copyin(tu->tu_data, pdata, tu->tu_size); |
1789 | if (error != 0) |
1790 | goto done; |
1791 | ccb = twe_ccb_alloc_wait(twe, |
1792 | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); |
1793 | KASSERT(ccb != NULL); |
1794 | ccb->ccb_data = pdata; |
1795 | ccb->ccb_datasize = TWE_SECTOR_SIZE; |
1796 | } else { |
1797 | ccb = twe_ccb_alloc_wait(twe, 0); |
1798 | KASSERT(ccb != NULL); |
1799 | } |
1800 | |
1801 | ccb->ccb_tx.tx_handler = twe_ccb_wait_handler; |
1802 | ccb->ccb_tx.tx_context = NULL; |
1803 | ccb->ccb_tx.tx_dv = twe->sc_dev; |
1804 | |
1805 | cmdid = ccb->ccb_cmdid; |
1806 | memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd)); |
1807 | ccb->ccb_cmd->tc_cmdid = cmdid; |
1808 | |
1809 | /* Map the transfer. */ |
1810 | if ((error = twe_ccb_map(twe, ccb)) != 0) { |
1811 | twe_ccb_free(twe, ccb); |
1812 | goto done; |
1813 | } |
1814 | |
1815 | /* Submit the command and wait up to 1 minute. */ |
1816 | error = 0; |
1817 | twe_ccb_enqueue(twe, ccb); |
1818 | s = splbio(); |
1819 | while ((ccb->ccb_flags & TWE_CCB_COMPLETE) == 0) |
1820 | if ((error = tsleep(ccb, PRIBIO, "tweioctl" , |
1821 | 60 * hz)) != 0) |
1822 | break; |
1823 | splx(s); |
1824 | |
1825 | /* Copy the command back to the ioctl argument. */ |
1826 | memcpy(&tu->tu_cmd, ccb->ccb_cmd, sizeof(struct twe_cmd)); |
1827 | #ifdef TWE_DEBUG |
1828 | printf("%s: TWEIO_COMMAND: tc_opcode = 0x%02x, " |
1829 | "tc_status = 0x%02x\n" , device_xname(twe->sc_dev), |
1830 | tu->tu_cmd.tc_opcode, tu->tu_cmd.tc_status); |
1831 | #endif |
1832 | |
1833 | s = splbio(); |
1834 | twe_ccb_free(twe, ccb); |
1835 | splx(s); |
1836 | |
1837 | if (tu->tu_size > 0) |
1838 | error = copyout(pdata, tu->tu_data, tu->tu_size); |
1839 | goto done; |
1840 | |
1841 | case TWEIO_STATS: |
1842 | return (ENOENT); |
1843 | |
1844 | case TWEIO_AEN_POLL: |
1845 | s = splbio(); |
1846 | *(u_int *)data = twe_aen_dequeue(twe); |
1847 | splx(s); |
1848 | return (0); |
1849 | |
1850 | case TWEIO_AEN_WAIT: |
1851 | s = splbio(); |
1852 | while ((*(u_int *)data = |
1853 | twe_aen_dequeue(twe)) == TWE_AEN_QUEUE_EMPTY) { |
1854 | twe->sc_flags |= TWEF_AENQ_WAIT; |
1855 | error = tsleep(&twe->sc_aen_queue, PRIBIO | PCATCH, |
1856 | "tweaen" , 0); |
1857 | if (error == EINTR) { |
1858 | splx(s); |
1859 | return (error); |
1860 | } |
1861 | } |
1862 | splx(s); |
1863 | return (0); |
1864 | |
1865 | case TWEIO_GET_PARAM: |
1866 | error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id, |
1867 | tp->tp_size, 0, ¶m); |
1868 | if (error != 0) |
1869 | return (error); |
1870 | if (param->tp_param_size > tp->tp_size) { |
1871 | error = EFAULT; |
1872 | goto done; |
1873 | } |
1874 | error = copyout(param->tp_data, tp->tp_data, |
1875 | param->tp_param_size); |
1876 | free(param, M_DEVBUF); |
1877 | goto done; |
1878 | |
1879 | case TWEIO_SET_PARAM: |
1880 | pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK); |
1881 | if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0) |
1882 | goto done; |
1883 | error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id, |
1884 | tp->tp_size, pdata); |
1885 | goto done; |
1886 | |
1887 | case TWEIO_RESET: |
1888 | s = splbio(); |
1889 | twe_reset(twe); |
1890 | splx(s); |
1891 | return (0); |
1892 | |
1893 | case TWEIO_ADD_UNIT: |
1894 | /* XXX mutex */ |
1895 | return (twe_add_unit(twe, td->td_unit)); |
1896 | |
1897 | case TWEIO_DEL_UNIT: |
1898 | /* XXX mutex */ |
1899 | return (twe_del_unit(twe, td->td_unit)); |
1900 | |
1901 | default: |
1902 | return EINVAL; |
1903 | } |
1904 | done: |
1905 | if (pdata) |
1906 | free(pdata, M_DEVBUF); |
1907 | return error; |
1908 | } |
1909 | |
1910 | const struct cdevsw twe_cdevsw = { |
1911 | .d_open = tweopen, |
1912 | .d_close = tweclose, |
1913 | .d_read = noread, |
1914 | .d_write = nowrite, |
1915 | .d_ioctl = tweioctl, |
1916 | .d_stop = nostop, |
1917 | .d_tty = notty, |
1918 | .d_poll = nopoll, |
1919 | .d_mmap = nommap, |
1920 | .d_kqfilter = nokqfilter, |
1921 | .d_discard = nodiscard, |
1922 | .d_flag = D_OTHER |
1923 | }; |
1924 | |
1925 | /* |
1926 | * Print some information about the controller |
1927 | */ |
1928 | static void |
1929 | twe_describe_controller(struct twe_softc *sc) |
1930 | { |
1931 | struct twe_param *p[6]; |
1932 | int i, rv = 0; |
1933 | uint32_t dsize; |
1934 | uint8_t ports; |
1935 | |
1936 | ports = 0; |
1937 | |
1938 | /* get the port count */ |
1939 | rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER, |
1940 | TWE_PARAM_CONTROLLER_PortCount, &ports); |
1941 | |
1942 | /* get version strings */ |
1943 | rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon, |
1944 | 16, NULL, &p[0]); |
1945 | rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW, |
1946 | 16, NULL, &p[1]); |
1947 | rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS, |
1948 | 16, NULL, &p[2]); |
1949 | rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB, |
1950 | 8, NULL, &p[3]); |
1951 | rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA, |
1952 | 8, NULL, &p[4]); |
1953 | rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI, |
1954 | 8, NULL, &p[5]); |
1955 | |
1956 | if (rv) { |
1957 | /* some error occurred */ |
1958 | aprint_error_dev(sc->sc_dev, |
1959 | "failed to fetch version information\n" ); |
1960 | return; |
1961 | } |
1962 | |
1963 | aprint_normal_dev(sc->sc_dev, "%d ports, Firmware %.16s, BIOS %.16s\n" , |
1964 | ports, p[1]->tp_data, p[2]->tp_data); |
1965 | |
1966 | aprint_verbose_dev(sc->sc_dev, |
1967 | "Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n" , |
1968 | p[0]->tp_data, p[3]->tp_data, |
1969 | p[4]->tp_data, p[5]->tp_data); |
1970 | |
1971 | free(p[0], M_DEVBUF); |
1972 | free(p[1], M_DEVBUF); |
1973 | free(p[2], M_DEVBUF); |
1974 | free(p[3], M_DEVBUF); |
1975 | free(p[4], M_DEVBUF); |
1976 | free(p[5], M_DEVBUF); |
1977 | |
1978 | rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY, |
1979 | TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]); |
1980 | if (rv) { |
1981 | aprint_error_dev(sc->sc_dev, |
1982 | "failed to get drive status summary\n" ); |
1983 | return; |
1984 | } |
1985 | for (i = 0; i < ports; i++) { |
1986 | if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present) |
1987 | continue; |
1988 | rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i, |
1989 | TWE_PARAM_DRIVEINFO_Size, &dsize); |
1990 | if (rv) { |
1991 | aprint_error_dev(sc->sc_dev, |
1992 | "unable to get drive size for port %d\n" , i); |
1993 | continue; |
1994 | } |
1995 | rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i, |
1996 | TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]); |
1997 | if (rv) { |
1998 | aprint_error_dev(sc->sc_dev, |
1999 | "unable to get drive model for port %d\n" , i); |
2000 | continue; |
2001 | } |
2002 | aprint_verbose_dev(sc->sc_dev, "port %d: %.40s %d MB\n" , |
2003 | i, p[1]->tp_data, dsize / 2048); |
2004 | free(p[1], M_DEVBUF); |
2005 | } |
2006 | free(p[0], M_DEVBUF); |
2007 | } |
2008 | |
2009 | MODULE(MODULE_CLASS_DRIVER, twe, "pci" ); |
2010 | |
2011 | #ifdef _MODULE |
2012 | #include "ioconf.c" |
2013 | #endif |
2014 | |
2015 | static int |
2016 | twe_modcmd(modcmd_t cmd, void *opaque) |
2017 | { |
2018 | int error = 0; |
2019 | |
2020 | #ifdef _MODULE |
2021 | switch (cmd) { |
2022 | case MODULE_CMD_INIT: |
2023 | error = config_init_component(cfdriver_ioconf_twe, |
2024 | cfattach_ioconf_twe, cfdata_ioconf_twe); |
2025 | break; |
2026 | case MODULE_CMD_FINI: |
2027 | error = config_fini_component(cfdriver_ioconf_twe, |
2028 | cfattach_ioconf_twe, cfdata_ioconf_twe); |
2029 | break; |
2030 | default: |
2031 | error = ENOTTY; |
2032 | break; |
2033 | } |
2034 | #endif |
2035 | |
2036 | return error; |
2037 | } |
2038 | |