1/* $NetBSD: icp.c,v 1.31 2012/10/27 17:18:20 chs Exp $ */
2
3/*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by Niklas Hallqvist.
46 * 4. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
61 */
62
63/*
64 * This driver would not have written if it was not for the hardware donations
65 * from both ICP-Vortex and Öko.neT. I want to thank them for their support.
66 *
67 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by
68 * Intel.
69 *
70 * Support for the ICP-Vortex management tools added by
71 * Jason R. Thorpe of Wasabi Systems, Inc., based on code
72 * provided by Achim Leubner <achim.leubner@intel.com>.
73 *
74 * Additional support for dynamic rescan of cacheservice drives by
75 * Jason R. Thorpe of Wasabi Systems, Inc.
76 */
77
78#include <sys/cdefs.h>
79__KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.31 2012/10/27 17:18:20 chs Exp $");
80
81#include <sys/param.h>
82#include <sys/systm.h>
83#include <sys/kernel.h>
84#include <sys/device.h>
85#include <sys/queue.h>
86#include <sys/proc.h>
87#include <sys/buf.h>
88#include <sys/endian.h>
89#include <sys/malloc.h>
90#include <sys/disk.h>
91
92#include <sys/bswap.h>
93#include <sys/bus.h>
94
95#include <dev/pci/pcireg.h>
96#include <dev/pci/pcivar.h>
97#include <dev/pci/pcidevs.h>
98
99#include <dev/ic/icpreg.h>
100#include <dev/ic/icpvar.h>
101
102#include <dev/scsipi/scsipi_all.h>
103#include <dev/scsipi/scsiconf.h>
104
105#include "locators.h"
106
107int icp_async_event(struct icp_softc *, int);
108void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
109void icp_chain(struct icp_softc *);
110int icp_print(void *, const char *);
111void icp_watchdog(void *);
112void icp_ucmd_intr(struct icp_ccb *);
113void icp_recompute_openings(struct icp_softc *);
114
115int icp_count; /* total # of controllers, for ioctl interface */
116
117/*
118 * Statistics for the ioctl interface to query.
119 *
120 * XXX Global. They should probably be made per-controller
121 * XXX at some point.
122 */
123gdt_statist_t icp_stats;
124
125int
126icp_init(struct icp_softc *icp, const char *intrstr)
127{
128 struct icp_attach_args icpa;
129 struct icp_binfo binfo;
130 struct icp_ccb *ic;
131 u_int16_t cdev_cnt;
132 int i, j, state, feat, nsegs, rv;
133 int locs[ICPCF_NLOCS];
134
135 state = 0;
136
137 if (intrstr != NULL)
138 aprint_normal_dev(icp->icp_dv, "interrupting at %s\n",
139 intrstr);
140
141 SIMPLEQ_INIT(&icp->icp_ccb_queue);
142 SIMPLEQ_INIT(&icp->icp_ccb_freelist);
143 SIMPLEQ_INIT(&icp->icp_ucmd_queue);
144 callout_init(&icp->icp_wdog_callout, 0);
145
146 /*
147 * Allocate a scratch area.
148 */
149 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
150 ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
151 &icp->icp_scr_dmamap) != 0) {
152 aprint_error_dev(icp->icp_dv, "cannot create scratch dmamap\n");
153 return (1);
154 }
155 state++;
156
157 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
158 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
159 aprint_error_dev(icp->icp_dv, "cannot alloc scratch dmamem\n");
160 goto bail_out;
161 }
162 state++;
163
164 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
165 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
166 aprint_error_dev(icp->icp_dv, "cannot map scratch dmamem\n");
167 goto bail_out;
168 }
169 state++;
170
171 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
172 ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
173 aprint_error_dev(icp->icp_dv, "cannot load scratch dmamap\n");
174 goto bail_out;
175 }
176 state++;
177
178 /*
179 * Allocate and initialize the command control blocks.
180 */
181 ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
182 if ((icp->icp_ccbs = ic) == NULL) {
183 aprint_error_dev(icp->icp_dv, "malloc() failed\n");
184 goto bail_out;
185 }
186 state++;
187
188 for (i = 0; i < ICP_NCCBS; i++, ic++) {
189 /*
190 * The first two command indexes have special meanings, so
191 * we can't use them.
192 */
193 ic->ic_ident = i + 2;
194 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
195 ICP_MAXSG, ICP_MAX_XFER, 0,
196 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
197 &ic->ic_xfer_map);
198 if (rv != 0)
199 break;
200 icp->icp_nccbs++;
201 icp_ccb_free(icp, ic);
202 }
203#ifdef DIAGNOSTIC
204 if (icp->icp_nccbs != ICP_NCCBS)
205 aprint_error_dev(icp->icp_dv, "%d/%d CCBs usable\n",
206 icp->icp_nccbs, ICP_NCCBS);
207#endif
208
209 /*
210 * Initalize the controller.
211 */
212 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
213 aprint_error_dev(icp->icp_dv, "screen service init error %d\n",
214 icp->icp_status);
215 goto bail_out;
216 }
217
218 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
219 aprint_error_dev(icp->icp_dv, "cache service init error %d\n",
220 icp->icp_status);
221 goto bail_out;
222 }
223
224 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
225
226 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
227 aprint_error_dev(icp->icp_dv, "cache service mount error %d\n",
228 icp->icp_status);
229 goto bail_out;
230 }
231
232 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
233 aprint_error_dev(icp->icp_dv, "cache service post-mount init error %d\n",
234 icp->icp_status);
235 goto bail_out;
236 }
237 cdev_cnt = (u_int16_t)icp->icp_info;
238 icp->icp_fw_vers = icp->icp_service;
239
240 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
241 aprint_error_dev(icp->icp_dv, "raw service init error %d\n",
242 icp->icp_status);
243 goto bail_out;
244 }
245
246 /*
247 * Set/get raw service features (scatter/gather).
248 */
249 feat = 0;
250 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
251 0, 0))
252 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
253 feat = icp->icp_info;
254
255 if ((feat & ICP_SCATTER_GATHER) == 0) {
256#ifdef DIAGNOSTIC
257 aprint_normal_dev(icp->icp_dv,
258 "scatter/gather not supported (raw service)\n");
259#endif
260 } else
261 icp->icp_features |= ICP_FEAT_RAWSERVICE;
262
263 /*
264 * Set/get cache service features (scatter/gather).
265 */
266 feat = 0;
267 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
268 ICP_SCATTER_GATHER, 0))
269 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
270 feat = icp->icp_info;
271
272 if ((feat & ICP_SCATTER_GATHER) == 0) {
273#ifdef DIAGNOSTIC
274 aprint_normal_dev(icp->icp_dv,
275 "scatter/gather not supported (cache service)\n");
276#endif
277 } else
278 icp->icp_features |= ICP_FEAT_CACHESERVICE;
279
280 /*
281 * Pull some information from the board and dump.
282 */
283 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
284 ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
285 aprint_error_dev(icp->icp_dv, "unable to retrive board info\n");
286 goto bail_out;
287 }
288 memcpy(&binfo, icp->icp_scr, sizeof(binfo));
289
290 aprint_normal_dev(icp->icp_dv,
291 "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
292 binfo.bi_type_string, binfo.bi_raid_string,
293 binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
294
295 /*
296 * Determine the number of devices, and number of openings per
297 * device.
298 */
299 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
300 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
301 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
302 0))
303 continue;
304
305 icp->icp_cdr[j].cd_size = icp->icp_info;
306 if (icp->icp_cdr[j].cd_size != 0)
307 icp->icp_ndevs++;
308
309 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
310 0))
311 icp->icp_cdr[j].cd_type = icp->icp_info;
312 }
313 }
314
315 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
316 icp->icp_nchan = binfo.bi_chan_count;
317 icp->icp_ndevs += icp->icp_nchan;
318 }
319
320 icp_recompute_openings(icp);
321
322 /*
323 * Attach SCSI channels.
324 */
325 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
326 struct icp_ioc_version *iv;
327 struct icp_rawioc *ri;
328 struct icp_getch *gc;
329
330 iv = (struct icp_ioc_version *)icp->icp_scr;
331 iv->iv_version = htole32(ICP_IOC_NEWEST);
332 iv->iv_listents = ICP_MAXBUS;
333 iv->iv_firstchan = 0;
334 iv->iv_lastchan = ICP_MAXBUS - 1;
335 iv->iv_listoffset = htole32(sizeof(*iv));
336
337 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
338 ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
339 sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
340 ri = (struct icp_rawioc *)(iv + 1);
341 for (j = 0; j < binfo.bi_chan_count; j++, ri++)
342 icp->icp_bus_id[j] = ri->ri_procid;
343 } else {
344 /*
345 * Fall back to the old method.
346 */
347 gc = (struct icp_getch *)icp->icp_scr;
348
349 for (j = 0; j < binfo.bi_chan_count; j++) {
350 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
351 ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
352 ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
353 sizeof(*gc))) {
354 aprint_error_dev(icp->icp_dv,
355 "unable to get chan info");
356 goto bail_out;
357 }
358 icp->icp_bus_id[j] = gc->gc_scsiid;
359 }
360 }
361
362 for (j = 0; j < binfo.bi_chan_count; j++) {
363 if (icp->icp_bus_id[j] > ICP_MAXID_FC)
364 icp->icp_bus_id[j] = ICP_MAXID_FC;
365
366 icpa.icpa_unit = j + ICPA_UNIT_SCSI;
367
368 locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
369
370 icp->icp_children[icpa.icpa_unit] =
371 config_found_sm_loc(icp->icp_dv, "icp", locs,
372 &icpa, icp_print, config_stdsubmatch);
373 }
374 }
375
376 /*
377 * Attach cache devices.
378 */
379 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
380 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
381 if (icp->icp_cdr[j].cd_size == 0)
382 continue;
383
384 icpa.icpa_unit = j;
385
386 locs[ICPCF_UNIT] = j;
387
388 icp->icp_children[icpa.icpa_unit] =
389 config_found_sm_loc(icp->icp_dv, "icp", locs,
390 &icpa, icp_print, config_stdsubmatch);
391 }
392 }
393
394 /*
395 * Start the watchdog.
396 */
397 icp_watchdog(icp);
398
399 /*
400 * Count the controller, and we're done!
401 */
402 if (icp_count++ == 0)
403 mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE);
404
405 return (0);
406
407 bail_out:
408 if (state > 4)
409 for (j = 0; j < i; j++)
410 bus_dmamap_destroy(icp->icp_dmat,
411 icp->icp_ccbs[j].ic_xfer_map);
412 if (state > 3)
413 free(icp->icp_ccbs, M_DEVBUF);
414 if (state > 2)
415 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
416 if (state > 1)
417 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
418 ICP_SCRATCH_SIZE);
419 if (state > 0)
420 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
421 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
422
423 return (1);
424}
425
426void
427icp_register_servicecb(struct icp_softc *icp, int unit,
428 const struct icp_servicecb *cb)
429{
430
431 icp->icp_servicecb[unit] = cb;
432}
433
434void
435icp_rescan(struct icp_softc *icp, int unit)
436{
437 struct icp_attach_args icpa;
438 u_int newsize, newtype;
439 int locs[ICPCF_NLOCS];
440
441 /*
442 * NOTE: It is very important that the queue be frozen and not
443 * commands running when this is called. The ioctl mutex must
444 * also be held.
445 */
446
447 KASSERT(icp->icp_qfreeze != 0);
448 KASSERT(icp->icp_running == 0);
449 KASSERT(unit < ICP_MAX_HDRIVES);
450
451 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
452#ifdef ICP_DEBUG
453 printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
454 device_xname(icp->icp_dv), unit, icp->icp_status);
455#endif
456 goto gone;
457 }
458 if ((newsize = icp->icp_info) == 0) {
459#ifdef ICP_DEBUG
460 printf("%s: rescan: unit %d has zero size\n",
461 device_xname(icp->icp_dv), unit);
462#endif
463 gone:
464 /*
465 * Host drive is no longer present; detach if a child
466 * is currently there.
467 */
468 if (icp->icp_cdr[unit].cd_size != 0)
469 icp->icp_ndevs--;
470 icp->icp_cdr[unit].cd_size = 0;
471 if (icp->icp_children[unit] != NULL) {
472 (void) config_detach(icp->icp_children[unit],
473 DETACH_FORCE);
474 icp->icp_children[unit] = NULL;
475 }
476 return;
477 }
478
479 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
480 newtype = icp->icp_info;
481 else {
482#ifdef ICP_DEBUG
483 printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
484 device_xname(icp->icp_dv), unit);
485#endif
486 newtype = 0; /* XXX? */
487 }
488
489#ifdef ICP_DEBUG
490 printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
491 device_xname(icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
492 icp->icp_cdr[unit].cd_type, newsize, newtype);
493#endif
494
495 /*
496 * If the type or size changed, detach any old child (if it exists)
497 * and attach a new one.
498 */
499 if (icp->icp_children[unit] == NULL ||
500 newsize != icp->icp_cdr[unit].cd_size ||
501 newtype != icp->icp_cdr[unit].cd_type) {
502 if (icp->icp_cdr[unit].cd_size == 0)
503 icp->icp_ndevs++;
504 icp->icp_cdr[unit].cd_size = newsize;
505 icp->icp_cdr[unit].cd_type = newtype;
506 if (icp->icp_children[unit] != NULL)
507 (void) config_detach(icp->icp_children[unit],
508 DETACH_FORCE);
509
510 icpa.icpa_unit = unit;
511
512 locs[ICPCF_UNIT] = unit;
513
514 icp->icp_children[unit] = config_found_sm_loc(icp->icp_dv,
515 "icp", locs, &icpa, icp_print, config_stdsubmatch);
516 }
517
518 icp_recompute_openings(icp);
519}
520
521void
522icp_rescan_all(struct icp_softc *icp)
523{
524 int unit;
525 u_int16_t cdev_cnt;
526
527 /*
528 * This is the old method of rescanning the host drives. We
529 * start by reinitializing the cache service.
530 */
531 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
532 printf("%s: unable to re-initialize cache service for rescan\n",
533 device_xname(icp->icp_dv));
534 return;
535 }
536 cdev_cnt = (u_int16_t) icp->icp_info;
537
538 /* For each host drive, do the new-style rescan. */
539 for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
540 icp_rescan(icp, unit);
541
542 /* Now detach anything in the slots after cdev_cnt. */
543 for (; unit < ICP_MAX_HDRIVES; unit++) {
544 if (icp->icp_cdr[unit].cd_size != 0) {
545#ifdef ICP_DEBUG
546 printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
547 device_xname(icp->icp_dv), unit, cdev_cnt);
548#endif
549 icp->icp_ndevs--;
550 icp->icp_cdr[unit].cd_size = 0;
551 if (icp->icp_children[unit] != NULL) {
552 (void) config_detach(icp->icp_children[unit],
553 DETACH_FORCE);
554 icp->icp_children[unit] = NULL;
555 }
556 }
557 }
558
559 icp_recompute_openings(icp);
560}
561
562void
563icp_recompute_openings(struct icp_softc *icp)
564{
565 int unit, openings;
566
567 if (icp->icp_ndevs != 0)
568 openings =
569 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
570 else
571 openings = 0;
572 if (openings == icp->icp_openings)
573 return;
574 icp->icp_openings = openings;
575
576#ifdef ICP_DEBUG
577 printf("%s: %d device%s, %d openings per device\n",
578 device_xname(icp->icp_dv), icp->icp_ndevs,
579 icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
580#endif
581
582 for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
583 if (icp->icp_children[unit] != NULL)
584 (*icp->icp_servicecb[unit]->iscb_openings)(
585 icp->icp_children[unit], icp->icp_openings);
586 }
587}
588
589void
590icp_watchdog(void *cookie)
591{
592 struct icp_softc *icp;
593 int s;
594
595 icp = cookie;
596
597 s = splbio();
598 icp_intr(icp);
599 if (ICP_HAS_WORK(icp))
600 icp_ccb_enqueue(icp, NULL);
601 splx(s);
602
603 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
604 icp_watchdog, icp);
605}
606
607int
608icp_print(void *aux, const char *pnp)
609{
610 struct icp_attach_args *icpa;
611 const char *str;
612
613 icpa = (struct icp_attach_args *)aux;
614
615 if (pnp != NULL) {
616 if (icpa->icpa_unit < ICPA_UNIT_SCSI)
617 str = "block device";
618 else
619 str = "SCSI channel";
620 aprint_normal("%s at %s", str, pnp);
621 }
622 aprint_normal(" unit %d", icpa->icpa_unit);
623
624 return (UNCONF);
625}
626
627int
628icp_async_event(struct icp_softc *icp, int service)
629{
630
631 if (service == ICP_SCREENSERVICE) {
632 if (icp->icp_status == ICP_S_MSG_REQUEST) {
633 /* XXX */
634 }
635 } else {
636 if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
637 icp->icp_evt.size = 0;
638 icp->icp_evt.eu.async.ionode =
639 device_unit(icp->icp_dv);
640 icp->icp_evt.eu.async.status = icp->icp_status;
641 /*
642 * Severity and event string are filled in by the
643 * hardware interface interrupt handler.
644 */
645 printf("%s: %s\n", device_xname(icp->icp_dv),
646 icp->icp_evt.event_string);
647 } else {
648 icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
649 icp->icp_evt.eu.async.ionode =
650 device_unit(icp->icp_dv);
651 icp->icp_evt.eu.async.service = service;
652 icp->icp_evt.eu.async.status = icp->icp_status;
653 icp->icp_evt.eu.async.info = icp->icp_info;
654 /* XXXJRT FIX THIS */
655 *(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
656 icp->icp_info2;
657 }
658 icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
659 }
660
661 return (0);
662}
663
664int
665icp_intr(void *cookie)
666{
667 struct icp_softc *icp;
668 struct icp_intr_ctx ctx;
669 struct icp_ccb *ic;
670
671 icp = cookie;
672
673 ctx.istatus = (*icp->icp_get_status)(icp);
674 if (!ctx.istatus) {
675 icp->icp_status = ICP_S_NO_STATUS;
676 return (0);
677 }
678
679 (*icp->icp_intr)(icp, &ctx);
680
681 icp->icp_status = ctx.cmd_status;
682 icp->icp_service = ctx.service;
683 icp->icp_info = ctx.info;
684 icp->icp_info2 = ctx.info2;
685
686 switch (ctx.istatus) {
687 case ICP_ASYNCINDEX:
688 icp_async_event(icp, ctx.service);
689 return (1);
690
691 case ICP_SPEZINDEX:
692 aprint_error_dev(icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
693 ctx.info, ctx.info2);
694 icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
695 icp->icp_evt.eu.driver.ionode = device_unit(icp->icp_dv);
696 icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
697 return (1);
698 }
699
700 if ((ctx.istatus - 2) > icp->icp_nccbs)
701 panic("icp_intr: bad command index returned");
702
703 ic = &icp->icp_ccbs[ctx.istatus - 2];
704 ic->ic_status = icp->icp_status;
705
706 if ((ic->ic_flags & IC_ALLOCED) == 0) {
707 /* XXX ICP's "iir" driver just sends an event here. */
708 panic("icp_intr: inactive CCB identified");
709 }
710
711 /*
712 * Try to protect ourselves from the running command count already
713 * being 0 (e.g. if a polled command times out).
714 */
715 KDASSERT(icp->icp_running != 0);
716 if (--icp->icp_running == 0 &&
717 (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
718 icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
719 wakeup(&icp->icp_qfreeze);
720 }
721
722 switch (icp->icp_status) {
723 case ICP_S_BSY:
724#ifdef ICP_DEBUG
725 printf("%s: ICP_S_BSY received\n", device_xname(icp->icp_dv));
726#endif
727 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
728 SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
729 else
730 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
731 break;
732
733 default:
734 ic->ic_flags |= IC_COMPLETE;
735
736 if ((ic->ic_flags & IC_WAITING) != 0)
737 wakeup(ic);
738 else if (ic->ic_intr != NULL)
739 (*ic->ic_intr)(ic);
740
741 if (ICP_HAS_WORK(icp))
742 icp_ccb_enqueue(icp, NULL);
743
744 break;
745 }
746
747 return (1);
748}
749
750struct icp_ucmd_ctx {
751 gdt_ucmd_t *iu_ucmd;
752 u_int32_t iu_cnt;
753};
754
755void
756icp_ucmd_intr(struct icp_ccb *ic)
757{
758 struct icp_softc *icp = device_private(ic->ic_dv);
759 struct icp_ucmd_ctx *iu = ic->ic_context;
760 gdt_ucmd_t *ucmd = iu->iu_ucmd;
761
762 ucmd->status = icp->icp_status;
763 ucmd->info = icp->icp_info;
764
765 if (iu->iu_cnt != 0) {
766 bus_dmamap_sync(icp->icp_dmat,
767 icp->icp_scr_dmamap,
768 ICP_SCRATCH_UCMD, iu->iu_cnt,
769 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
770 memcpy(ucmd->data,
771 (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
772 }
773
774 icp->icp_ucmd_ccb = NULL;
775
776 ic->ic_flags |= IC_COMPLETE;
777 wakeup(ic);
778}
779
780/*
781 * NOTE: We assume that it is safe to sleep here!
782 */
783int
784icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
785 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
786{
787 struct icp_ioctlcmd *icmd;
788 struct icp_cachecmd *cc;
789 struct icp_rawcmd *rc;
790 int retries, rv;
791 struct icp_ccb *ic;
792
793 retries = ICP_RETRIES;
794
795 do {
796 ic = icp_ccb_alloc_wait(icp);
797 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
798 ic->ic_cmd.cmd_opcode = htole16(opcode);
799
800 switch (service) {
801 case ICP_CACHESERVICE:
802 if (opcode == ICP_IOCTL) {
803 icmd = &ic->ic_cmd.cmd_packet.ic;
804 icmd->ic_subfunc = htole16(arg1);
805 icmd->ic_channel = htole32(arg2);
806 icmd->ic_bufsize = htole32(arg3);
807 icmd->ic_addr =
808 htole32(icp->icp_scr_seg[0].ds_addr);
809
810 bus_dmamap_sync(icp->icp_dmat,
811 icp->icp_scr_dmamap, 0, arg3,
812 BUS_DMASYNC_PREWRITE |
813 BUS_DMASYNC_PREREAD);
814 } else {
815 cc = &ic->ic_cmd.cmd_packet.cc;
816 cc->cc_deviceno = htole16(arg1);
817 cc->cc_blockno = htole32(arg2);
818 }
819 break;
820
821 case ICP_SCSIRAWSERVICE:
822 rc = &ic->ic_cmd.cmd_packet.rc;
823 rc->rc_direction = htole32(arg1);
824 rc->rc_bus = arg2;
825 rc->rc_target = arg3;
826 rc->rc_lun = arg3 >> 8;
827 break;
828 }
829
830 ic->ic_service = service;
831 ic->ic_cmdlen = sizeof(ic->ic_cmd);
832 rv = icp_ccb_poll(icp, ic, 10000);
833
834 switch (service) {
835 case ICP_CACHESERVICE:
836 if (opcode == ICP_IOCTL) {
837 bus_dmamap_sync(icp->icp_dmat,
838 icp->icp_scr_dmamap, 0, arg3,
839 BUS_DMASYNC_POSTWRITE |
840 BUS_DMASYNC_POSTREAD);
841 }
842 break;
843 }
844
845 icp_ccb_free(icp, ic);
846 } while (rv != 0 && --retries > 0);
847
848 return (icp->icp_status == ICP_S_OK);
849}
850
851int
852icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
853{
854 struct icp_ccb *ic;
855 struct icp_ucmd_ctx iu;
856 u_int32_t cnt;
857 int error;
858
859 if (ucmd->service == ICP_CACHESERVICE) {
860 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
861 cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
862 if (cnt > GDT_SCRATCH_SZ) {
863 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
864 GDT_SCRATCH_SZ, cnt);
865 return (EINVAL);
866 }
867 } else {
868 cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
869 ICP_SECTOR_SIZE;
870 if (cnt > GDT_SCRATCH_SZ) {
871 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
872 GDT_SCRATCH_SZ, cnt);
873 return (EINVAL);
874 }
875 }
876 } else {
877 cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
878 ucmd->command.cmd_packet.rc.rc_sense_len;
879 if (cnt > GDT_SCRATCH_SZ) {
880 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
881 GDT_SCRATCH_SZ, cnt);
882 return (EINVAL);
883 }
884 }
885
886 iu.iu_ucmd = ucmd;
887 iu.iu_cnt = cnt;
888
889 ic = icp_ccb_alloc_wait(icp);
890 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
891 ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
892
893 if (ucmd->service == ICP_CACHESERVICE) {
894 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
895 struct icp_ioctlcmd *icmd, *uicmd;
896
897 icmd = &ic->ic_cmd.cmd_packet.ic;
898 uicmd = &ucmd->command.cmd_packet.ic;
899
900 icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
901 icmd->ic_channel = htole32(uicmd->ic_channel);
902 icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
903 icmd->ic_addr =
904 htole32(icp->icp_scr_seg[0].ds_addr +
905 ICP_SCRATCH_UCMD);
906 } else {
907 struct icp_cachecmd *cc, *ucc;
908
909 cc = &ic->ic_cmd.cmd_packet.cc;
910 ucc = &ucmd->command.cmd_packet.cc;
911
912 cc->cc_deviceno = htole16(ucc->cc_deviceno);
913 cc->cc_blockno = htole32(ucc->cc_blockno);
914 cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
915 cc->cc_addr = htole32(0xffffffffU);
916 cc->cc_nsgent = htole32(1);
917 cc->cc_sg[0].sg_addr =
918 htole32(icp->icp_scr_seg[0].ds_addr +
919 ICP_SCRATCH_UCMD);
920 cc->cc_sg[0].sg_len = htole32(cnt);
921 }
922 } else {
923 struct icp_rawcmd *rc, *urc;
924
925 rc = &ic->ic_cmd.cmd_packet.rc;
926 urc = &ucmd->command.cmd_packet.rc;
927
928 rc->rc_direction = htole32(urc->rc_direction);
929 rc->rc_sdata = htole32(0xffffffffU);
930 rc->rc_sdlen = htole32(urc->rc_sdlen);
931 rc->rc_clen = htole32(urc->rc_clen);
932 memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
933 rc->rc_target = urc->rc_target;
934 rc->rc_lun = urc->rc_lun;
935 rc->rc_bus = urc->rc_bus;
936 rc->rc_sense_len = htole32(urc->rc_sense_len);
937 rc->rc_sense_addr =
938 htole32(icp->icp_scr_seg[0].ds_addr +
939 ICP_SCRATCH_UCMD + urc->rc_sdlen);
940 rc->rc_nsgent = htole32(1);
941 rc->rc_sg[0].sg_addr =
942 htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
943 rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
944 }
945
946 ic->ic_service = ucmd->service;
947 ic->ic_cmdlen = sizeof(ic->ic_cmd);
948 ic->ic_context = &iu;
949
950 /*
951 * XXX What units are ucmd->timeout in? Until we know, we
952 * XXX just pull a number out of thin air.
953 */
954 if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
955 aprint_error_dev(icp->icp_dv, "error %d waiting for ucmd to complete\n",
956 error);
957
958 /* icp_ucmd_intr() has updated ucmd. */
959 icp_ccb_free(icp, ic);
960
961 return (error);
962}
963
964struct icp_ccb *
965icp_ccb_alloc(struct icp_softc *icp)
966{
967 struct icp_ccb *ic;
968 int s;
969
970 s = splbio();
971 if (__predict_false((ic =
972 SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
973 splx(s);
974 return (NULL);
975 }
976 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
977 splx(s);
978
979 ic->ic_flags = IC_ALLOCED;
980 return (ic);
981}
982
983struct icp_ccb *
984icp_ccb_alloc_wait(struct icp_softc *icp)
985{
986 struct icp_ccb *ic;
987 int s;
988
989 s = splbio();
990 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
991 icp->icp_flags |= ICP_F_WAIT_CCB;
992 (void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
993 }
994 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
995 splx(s);
996
997 ic->ic_flags = IC_ALLOCED;
998 return (ic);
999}
1000
1001void
1002icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1003{
1004 int s;
1005
1006 s = splbio();
1007 ic->ic_flags = 0;
1008 ic->ic_intr = NULL;
1009 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1010 if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1011 icp->icp_flags &= ~ICP_F_WAIT_CCB;
1012 wakeup(&icp->icp_ccb_freelist);
1013 }
1014 splx(s);
1015}
1016
1017void
1018icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1019{
1020 int s;
1021
1022 s = splbio();
1023
1024 if (ic != NULL) {
1025 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1026 SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1027 else
1028 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1029 }
1030
1031 for (; icp->icp_qfreeze == 0;) {
1032 if (__predict_false((ic =
1033 SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1034 struct icp_ucmd_ctx *iu = ic->ic_context;
1035 gdt_ucmd_t *ucmd = iu->iu_ucmd;
1036
1037 /*
1038 * All user-generated commands share the same
1039 * scratch space, so if one is already running,
1040 * we have to stall the command queue.
1041 */
1042 if (icp->icp_ucmd_ccb != NULL)
1043 break;
1044 if ((*icp->icp_test_busy)(icp))
1045 break;
1046 icp->icp_ucmd_ccb = ic;
1047
1048 if (iu->iu_cnt != 0) {
1049 memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
1050 ucmd->data, iu->iu_cnt);
1051 bus_dmamap_sync(icp->icp_dmat,
1052 icp->icp_scr_dmamap,
1053 ICP_SCRATCH_UCMD, iu->iu_cnt,
1054 BUS_DMASYNC_PREREAD |
1055 BUS_DMASYNC_PREWRITE);
1056 }
1057 } else if (__predict_true((ic =
1058 SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1059 if ((*icp->icp_test_busy)(icp))
1060 break;
1061 } else {
1062 /* no command found */
1063 break;
1064 }
1065 icp_ccb_submit(icp, ic);
1066 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1067 SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1068 else
1069 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1070 }
1071
1072 splx(s);
1073}
1074
1075int
1076icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1077 int dir)
1078{
1079 struct icp_sg *sg;
1080 int nsegs, i, rv;
1081 bus_dmamap_t xfer;
1082
1083 xfer = ic->ic_xfer_map;
1084
1085 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1086 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1087 ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1088 if (rv != 0)
1089 return (rv);
1090
1091 nsegs = xfer->dm_nsegs;
1092 ic->ic_xfer_size = size;
1093 ic->ic_nsgent = nsegs;
1094 ic->ic_flags |= dir;
1095 sg = ic->ic_sg;
1096
1097 if (sg != NULL) {
1098 for (i = 0; i < nsegs; i++, sg++) {
1099 sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1100 sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1101 }
1102 } else if (nsegs > 1)
1103 panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1104
1105 if ((dir & IC_XFER_OUT) != 0)
1106 i = BUS_DMASYNC_PREWRITE;
1107 else /* if ((dir & IC_XFER_IN) != 0) */
1108 i = BUS_DMASYNC_PREREAD;
1109
1110 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1111 return (0);
1112}
1113
1114void
1115icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1116{
1117 int i;
1118
1119 if ((ic->ic_flags & IC_XFER_OUT) != 0)
1120 i = BUS_DMASYNC_POSTWRITE;
1121 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1122 i = BUS_DMASYNC_POSTREAD;
1123
1124 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1125 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1126}
1127
1128int
1129icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1130{
1131 int s, rv;
1132
1133 s = splbio();
1134
1135 for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1136 if (!(*icp->icp_test_busy)(icp))
1137 break;
1138 DELAY(10);
1139 }
1140 if (timo == 0) {
1141 printf("%s: submit: busy\n", device_xname(icp->icp_dv));
1142 return (EAGAIN);
1143 }
1144
1145 icp_ccb_submit(icp, ic);
1146
1147 if (cold) {
1148 for (timo *= 10; timo != 0; timo--) {
1149 DELAY(100);
1150 icp_intr(icp);
1151 if ((ic->ic_flags & IC_COMPLETE) != 0)
1152 break;
1153 }
1154 } else {
1155 ic->ic_flags |= IC_WAITING;
1156 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1157 if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1158 mstohz(timo))) != 0) {
1159 timo = 0;
1160 break;
1161 }
1162 }
1163 }
1164
1165 if (timo != 0) {
1166 if (ic->ic_status != ICP_S_OK) {
1167#ifdef ICP_DEBUG
1168 printf("%s: request failed; status=0x%04x\n",
1169 device_xname(icp->icp_dv), ic->ic_status);
1170#endif
1171 rv = EIO;
1172 } else
1173 rv = 0;
1174 } else {
1175 aprint_error_dev(icp->icp_dv, "command timed out\n");
1176 rv = EIO;
1177 }
1178
1179 while ((*icp->icp_test_busy)(icp) != 0)
1180 DELAY(10);
1181
1182 splx(s);
1183
1184 return (rv);
1185}
1186
1187int
1188icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1189{
1190 int s, rv;
1191
1192 ic->ic_flags |= IC_WAITING;
1193
1194 s = splbio();
1195 icp_ccb_enqueue(icp, ic);
1196 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1197 if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1198 splx(s);
1199 return (rv);
1200 }
1201 }
1202 splx(s);
1203
1204 if (ic->ic_status != ICP_S_OK) {
1205 aprint_error_dev(icp->icp_dv, "command failed; status=%x\n",
1206 ic->ic_status);
1207 return (EIO);
1208 }
1209
1210 return (0);
1211}
1212
1213int
1214icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1215{
1216 int s, rv;
1217
1218 ic->ic_dv = icp->icp_dv;
1219 ic->ic_intr = icp_ucmd_intr;
1220 ic->ic_flags |= IC_UCMD;
1221
1222 s = splbio();
1223 icp_ccb_enqueue(icp, ic);
1224 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1225 if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1226 splx(s);
1227 return (rv);
1228 }
1229 }
1230 splx(s);
1231
1232 return (0);
1233}
1234
1235void
1236icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1237{
1238
1239 ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1240
1241 (*icp->icp_set_sema0)(icp);
1242 DELAY(10);
1243
1244 ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1245 ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1246
1247 icp->icp_running++;
1248
1249 (*icp->icp_copy_cmd)(icp, ic);
1250 (*icp->icp_release_event)(icp, ic);
1251}
1252
1253int
1254icp_freeze(struct icp_softc *icp)
1255{
1256 int s, error = 0;
1257
1258 s = splbio();
1259 if (icp->icp_qfreeze++ == 0) {
1260 while (icp->icp_running != 0) {
1261 icp->icp_flags |= ICP_F_WAIT_FREEZE;
1262 error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1263 "icpqfrz", 0);
1264 if (error != 0 && --icp->icp_qfreeze == 0 &&
1265 ICP_HAS_WORK(icp)) {
1266 icp_ccb_enqueue(icp, NULL);
1267 break;
1268 }
1269 }
1270 }
1271 splx(s);
1272
1273 return (error);
1274}
1275
1276void
1277icp_unfreeze(struct icp_softc *icp)
1278{
1279 int s;
1280
1281 s = splbio();
1282 KDASSERT(icp->icp_qfreeze != 0);
1283 if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1284 icp_ccb_enqueue(icp, NULL);
1285 splx(s);
1286}
1287
1288/* XXX Global - should be per-controller? XXX */
1289static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1290static int icp_event_oldidx;
1291static int icp_event_lastidx;
1292
1293gdt_evt_str *
1294icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1295 gdt_evt_data *evt)
1296{
1297 gdt_evt_str *e;
1298
1299 /* no source == no event */
1300 if (source == 0)
1301 return (NULL);
1302
1303 e = &icp_event_buffer[icp_event_lastidx];
1304 if (e->event_source == source && e->event_idx == idx &&
1305 ((evt->size != 0 && e->event_data.size != 0 &&
1306 memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1307 (evt->size == 0 && e->event_data.size == 0 &&
1308 strcmp((char *) e->event_data.event_string,
1309 (char *) evt->event_string) == 0))) {
1310 e->last_stamp = time_second;
1311 e->same_count++;
1312 } else {
1313 if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1314 icp_event_lastidx++;
1315 if (icp_event_lastidx == ICP_MAX_EVENTS)
1316 icp_event_lastidx = 0;
1317 if (icp_event_lastidx == icp_event_oldidx) {
1318 icp_event_oldidx++;
1319 if (icp_event_oldidx == ICP_MAX_EVENTS)
1320 icp_event_oldidx = 0;
1321 }
1322 }
1323 e = &icp_event_buffer[icp_event_lastidx];
1324 e->event_source = source;
1325 e->event_idx = idx;
1326 e->first_stamp = e->last_stamp = time_second;
1327 e->same_count = 1;
1328 e->event_data = *evt;
1329 e->application = 0;
1330 }
1331 return (e);
1332}
1333
1334int
1335icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1336{
1337 gdt_evt_str *e;
1338 int eindex, s;
1339
1340 s = splbio();
1341
1342 if (handle == -1)
1343 eindex = icp_event_oldidx;
1344 else
1345 eindex = handle;
1346
1347 estr->event_source = 0;
1348
1349 if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1350 splx(s);
1351 return (eindex);
1352 }
1353
1354 e = &icp_event_buffer[eindex];
1355 if (e->event_source != 0) {
1356 if (eindex != icp_event_lastidx) {
1357 eindex++;
1358 if (eindex == ICP_MAX_EVENTS)
1359 eindex = 0;
1360 } else
1361 eindex = -1;
1362 memcpy(estr, e, sizeof(gdt_evt_str));
1363 }
1364
1365 splx(s);
1366
1367 return (eindex);
1368}
1369
1370void
1371icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1372 gdt_evt_str *estr)
1373{
1374 gdt_evt_str *e;
1375 int found = 0, eindex, s;
1376
1377 s = splbio();
1378
1379 eindex = icp_event_oldidx;
1380 for (;;) {
1381 e = &icp_event_buffer[eindex];
1382 if (e->event_source == 0)
1383 break;
1384 if ((e->application & application) == 0) {
1385 e->application |= application;
1386 found = 1;
1387 break;
1388 }
1389 if (eindex == icp_event_lastidx)
1390 break;
1391 eindex++;
1392 if (eindex == ICP_MAX_EVENTS)
1393 eindex = 0;
1394 }
1395 if (found)
1396 memcpy(estr, e, sizeof(gdt_evt_str));
1397 else
1398 estr->event_source = 0;
1399
1400 splx(s);
1401}
1402
1403void
1404icp_clear_events(struct icp_softc *icp)
1405{
1406 int s;
1407
1408 s = splbio();
1409 icp_event_oldidx = icp_event_lastidx = 0;
1410 memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1411 splx(s);
1412}
1413