1 | /* $NetBSD: hifn7751.c,v 1.61 2015/05/14 07:27:14 maxv Exp $ */ |
2 | /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */ |
3 | /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */ |
4 | |
5 | /* |
6 | * Invertex AEON / Hifn 7751 driver |
7 | * Copyright (c) 1999 Invertex Inc. All rights reserved. |
8 | * Copyright (c) 1999 Theo de Raadt |
9 | * Copyright (c) 2000-2001 Network Security Technologies, Inc. |
10 | * http://www.netsec.net |
11 | * Copyright (c) 2003 Hifn Inc. |
12 | * |
13 | * This driver is based on a previous driver by Invertex, for which they |
14 | * requested: Please send any comments, feedback, bug-fixes, or feature |
15 | * requests to software@invertex.com. |
16 | * |
17 | * Redistribution and use in source and binary forms, with or without |
18 | * modification, are permitted provided that the following conditions |
19 | * are met: |
20 | * |
21 | * 1. Redistributions of source code must retain the above copyright |
22 | * notice, this list of conditions and the following disclaimer. |
23 | * 2. Redistributions in binary form must reproduce the above copyright |
24 | * notice, this list of conditions and the following disclaimer in the |
25 | * documentation and/or other materials provided with the distribution. |
26 | * 3. The name of the author may not be used to endorse or promote products |
27 | * derived from this software without specific prior written permission. |
28 | * |
29 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
30 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
31 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
32 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
33 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
34 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
35 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
36 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
37 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
38 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
39 | * |
40 | * Effort sponsored in part by the Defense Advanced Research Projects |
41 | * Agency (DARPA) and Air Force Research Laboratory, Air Force |
42 | * Materiel Command, USAF, under agreement number F30602-01-2-0537. |
43 | * |
44 | */ |
45 | |
46 | /* |
47 | * Driver for various Hifn pre-HIPP encryption processors. |
48 | */ |
49 | |
50 | #include <sys/cdefs.h> |
51 | __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.61 2015/05/14 07:27:14 maxv Exp $" ); |
52 | |
53 | #include <sys/param.h> |
54 | #include <sys/systm.h> |
55 | #include <sys/mutex.h> |
56 | #include <sys/proc.h> |
57 | #include <sys/errno.h> |
58 | #include <sys/malloc.h> |
59 | #include <sys/kernel.h> |
60 | #include <sys/mbuf.h> |
61 | #include <sys/device.h> |
62 | #include <sys/module.h> |
63 | |
64 | #ifdef __OpenBSD__ |
65 | #include <crypto/crypto.h> |
66 | #include <dev/rndvar.h> |
67 | #else |
68 | #include <opencrypto/cryptodev.h> |
69 | #include <sys/cprng.h> |
70 | #include <sys/rndpool.h> |
71 | #include <sys/rndsource.h> |
72 | #include <sys/sha1.h> |
73 | #endif |
74 | |
75 | #include <dev/pci/pcireg.h> |
76 | #include <dev/pci/pcivar.h> |
77 | #include <dev/pci/pcidevs.h> |
78 | |
79 | #include <dev/pci/hifn7751reg.h> |
80 | #include <dev/pci/hifn7751var.h> |
81 | |
82 | #undef HIFN_DEBUG |
83 | |
84 | #ifdef __NetBSD__ |
85 | #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */ |
86 | #endif |
87 | |
88 | #ifdef HIFN_DEBUG |
89 | extern int hifn_debug; /* patchable */ |
90 | int hifn_debug = 1; |
91 | #endif |
92 | |
93 | #ifdef __OpenBSD__ |
94 | #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */ |
95 | #endif |
96 | |
97 | /* |
98 | * Prototypes and count for the pci_device structure |
99 | */ |
100 | #ifdef __OpenBSD__ |
101 | static int hifn_probe((struct device *, void *, void *); |
102 | #else |
103 | static int hifn_probe(device_t, cfdata_t, void *); |
104 | #endif |
105 | static void hifn_attach(device_t, device_t, void *); |
106 | #ifdef __NetBSD__ |
107 | static int hifn_detach(device_t, int); |
108 | |
109 | CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc), |
110 | hifn_probe, hifn_attach, hifn_detach, NULL); |
111 | #else |
112 | CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc), |
113 | hifn_probe, hifn_attach, NULL, NULL); |
114 | #endif |
115 | |
116 | #ifdef __OpenBSD__ |
117 | struct cfdriver hifn_cd = { |
118 | 0, "hifn" , DV_DULL |
119 | }; |
120 | #endif |
121 | |
122 | static void hifn_reset_board(struct hifn_softc *, int); |
123 | static void hifn_reset_puc(struct hifn_softc *); |
124 | static void hifn_puc_wait(struct hifn_softc *); |
125 | static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t); |
126 | static void hifn_set_retry(struct hifn_softc *); |
127 | static void hifn_init_dma(struct hifn_softc *); |
128 | static void hifn_init_pci_registers(struct hifn_softc *); |
129 | static int hifn_sramsize(struct hifn_softc *); |
130 | static int hifn_dramsize(struct hifn_softc *); |
131 | static int hifn_ramtype(struct hifn_softc *); |
132 | static void hifn_sessions(struct hifn_softc *); |
133 | static int hifn_intr(void *); |
134 | static u_int hifn_write_command(struct hifn_command *, u_int8_t *); |
135 | static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); |
136 | static int hifn_newsession(void*, u_int32_t *, struct cryptoini *); |
137 | static int hifn_freesession(void*, u_int64_t); |
138 | static int hifn_process(void*, struct cryptop *, int); |
139 | static void hifn_callback(struct hifn_softc *, struct hifn_command *, |
140 | u_int8_t *); |
141 | static int hifn_crypto(struct hifn_softc *, struct hifn_command *, |
142 | struct cryptop*, int); |
143 | static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); |
144 | static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); |
145 | static int hifn_dmamap_aligned(bus_dmamap_t); |
146 | static int hifn_dmamap_load_src(struct hifn_softc *, |
147 | struct hifn_command *); |
148 | static int hifn_dmamap_load_dst(struct hifn_softc *, |
149 | struct hifn_command *); |
150 | static int hifn_init_pubrng(struct hifn_softc *); |
151 | static void hifn_rng(void *); |
152 | static void hifn_rng_locked(void *); |
153 | static void hifn_tick(void *); |
154 | static void hifn_abort(struct hifn_softc *); |
155 | static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, |
156 | int *); |
157 | static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t); |
158 | static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t); |
159 | #ifdef HAVE_CRYPTO_LZS |
160 | static int hifn_compression(struct hifn_softc *, struct cryptop *, |
161 | struct hifn_command *); |
162 | static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *); |
163 | static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *); |
164 | static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *, |
165 | u_int8_t *); |
166 | #endif /* HAVE_CRYPTO_LZS */ |
167 | |
168 | struct hifn_stats hifnstats; |
169 | |
170 | static const struct hifn_product { |
171 | pci_vendor_id_t hifn_vendor; |
172 | pci_product_id_t hifn_product; |
173 | int hifn_flags; |
174 | const char *hifn_name; |
175 | } hifn_products[] = { |
176 | { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, |
177 | 0, |
178 | "Invertex AEON" , |
179 | }, |
180 | |
181 | { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, |
182 | 0, |
183 | "Hifn 7751" , |
184 | }, |
185 | { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, |
186 | 0, |
187 | "Hifn 7751 (NetSec)" |
188 | }, |
189 | |
190 | { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, |
191 | HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE, |
192 | "Hifn 7811" , |
193 | }, |
194 | |
195 | { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, |
196 | HIFN_HAS_RNG | HIFN_HAS_PUBLIC, |
197 | "Hifn 7951" , |
198 | }, |
199 | |
200 | { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, |
201 | HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, |
202 | "Hifn 7955" , |
203 | }, |
204 | |
205 | { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, |
206 | HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, |
207 | "Hifn 7956" , |
208 | }, |
209 | |
210 | |
211 | { 0, 0, |
212 | 0, |
213 | NULL |
214 | } |
215 | }; |
216 | |
217 | static const struct hifn_product * |
218 | hifn_lookup(const struct pci_attach_args *pa) |
219 | { |
220 | const struct hifn_product *hp; |
221 | |
222 | for (hp = hifn_products; hp->hifn_name != NULL; hp++) { |
223 | if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor && |
224 | PCI_PRODUCT(pa->pa_id) == hp->hifn_product) |
225 | return (hp); |
226 | } |
227 | return (NULL); |
228 | } |
229 | |
230 | static int |
231 | hifn_probe(device_t parent, cfdata_t match, void *aux) |
232 | { |
233 | struct pci_attach_args *pa = aux; |
234 | |
235 | if (hifn_lookup(pa) != NULL) |
236 | return 1; |
237 | |
238 | return 0; |
239 | } |
240 | |
241 | static void |
242 | hifn_attach(device_t parent, device_t self, void *aux) |
243 | { |
244 | struct hifn_softc *sc = device_private(self); |
245 | struct pci_attach_args *pa = aux; |
246 | const struct hifn_product *hp; |
247 | pci_chipset_tag_t pc = pa->pa_pc; |
248 | pci_intr_handle_t ih; |
249 | const char *intrstr = NULL; |
250 | const char *hifncap; |
251 | char rbase; |
252 | #ifdef __NetBSD__ |
253 | #define iosize0 sc->sc_iosz0 |
254 | #define iosize1 sc->sc_iosz1 |
255 | #else |
256 | bus_size_t iosize0, iosize1; |
257 | #endif |
258 | u_int32_t cmd; |
259 | u_int16_t ena; |
260 | bus_dma_segment_t seg; |
261 | bus_dmamap_t dmamap; |
262 | int rseg; |
263 | void *kva; |
264 | char intrbuf[PCI_INTRSTR_LEN]; |
265 | |
266 | hp = hifn_lookup(pa); |
267 | if (hp == NULL) { |
268 | printf("\n" ); |
269 | panic("hifn_attach: impossible" ); |
270 | } |
271 | |
272 | pci_aprint_devinfo_fancy(pa, "Crypto processor" , hp->hifn_name, 1); |
273 | |
274 | sc->sc_dv = self; |
275 | sc->sc_pci_pc = pa->pa_pc; |
276 | sc->sc_pci_tag = pa->pa_tag; |
277 | |
278 | sc->sc_flags = hp->hifn_flags; |
279 | |
280 | cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
281 | cmd |= PCI_COMMAND_MASTER_ENABLE; |
282 | pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); |
283 | |
284 | if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0, |
285 | &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) { |
286 | aprint_error_dev(sc->sc_dv, "can't map mem space %d\n" , 0); |
287 | return; |
288 | } |
289 | |
290 | if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0, |
291 | &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) { |
292 | aprint_error_dev(sc->sc_dv, "can't find mem space %d\n" , 1); |
293 | goto fail_io0; |
294 | } |
295 | |
296 | hifn_set_retry(sc); |
297 | |
298 | if (sc->sc_flags & HIFN_NO_BURSTWRITE) { |
299 | sc->sc_waw_lastgroup = -1; |
300 | sc->sc_waw_lastreg = 1; |
301 | } |
302 | |
303 | sc->sc_dmat = pa->pa_dmat; |
304 | if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0, |
305 | &seg, 1, &rseg, BUS_DMA_NOWAIT)) { |
306 | aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n" ); |
307 | goto fail_io1; |
308 | } |
309 | if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva, |
310 | BUS_DMA_NOWAIT)) { |
311 | aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n" , |
312 | (u_long)sizeof(*sc->sc_dma)); |
313 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
314 | goto fail_io1; |
315 | } |
316 | if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1, |
317 | sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) { |
318 | aprint_error_dev(sc->sc_dv, "can't create DMA map\n" ); |
319 | bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); |
320 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
321 | goto fail_io1; |
322 | } |
323 | if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma), |
324 | NULL, BUS_DMA_NOWAIT)) { |
325 | aprint_error_dev(sc->sc_dv, "can't load DMA map\n" ); |
326 | bus_dmamap_destroy(sc->sc_dmat, dmamap); |
327 | bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); |
328 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
329 | goto fail_io1; |
330 | } |
331 | sc->sc_dmamap = dmamap; |
332 | sc->sc_dma = (struct hifn_dma *)kva; |
333 | memset(sc->sc_dma, 0, sizeof(*sc->sc_dma)); |
334 | |
335 | hifn_reset_board(sc, 0); |
336 | |
337 | if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) { |
338 | aprint_error_dev(sc->sc_dv, "crypto enabling failed\n" ); |
339 | goto fail_mem; |
340 | } |
341 | hifn_reset_puc(sc); |
342 | |
343 | hifn_init_dma(sc); |
344 | hifn_init_pci_registers(sc); |
345 | |
346 | /* XXX can't dynamically determine ram type for 795x; force dram */ |
347 | if (sc->sc_flags & HIFN_IS_7956) |
348 | sc->sc_drammodel = 1; |
349 | else if (hifn_ramtype(sc)) |
350 | goto fail_mem; |
351 | |
352 | if (sc->sc_drammodel == 0) |
353 | hifn_sramsize(sc); |
354 | else |
355 | hifn_dramsize(sc); |
356 | |
357 | /* |
358 | * Workaround for NetSec 7751 rev A: half ram size because two |
359 | * of the address lines were left floating |
360 | */ |
361 | if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC && |
362 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 && |
363 | PCI_REVISION(pa->pa_class) == 0x61) |
364 | sc->sc_ramsize >>= 1; |
365 | |
366 | if (pci_intr_map(pa, &ih)) { |
367 | aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n" ); |
368 | goto fail_mem; |
369 | } |
370 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); |
371 | #ifdef __OpenBSD__ |
372 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc, |
373 | device_xname(self)); |
374 | #else |
375 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc); |
376 | #endif |
377 | if (sc->sc_ih == NULL) { |
378 | aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n" ); |
379 | if (intrstr != NULL) |
380 | aprint_error(" at %s" , intrstr); |
381 | aprint_error("\n" ); |
382 | goto fail_mem; |
383 | } |
384 | |
385 | hifn_sessions(sc); |
386 | |
387 | rseg = sc->sc_ramsize / 1024; |
388 | rbase = 'K'; |
389 | if (sc->sc_ramsize >= (1024 * 1024)) { |
390 | rbase = 'M'; |
391 | rseg /= 1024; |
392 | } |
393 | aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n" , |
394 | hifncap, rseg, rbase, |
395 | sc->sc_drammodel ? 'D' : 'S', intrstr); |
396 | |
397 | sc->sc_cid = crypto_get_driverid(0); |
398 | if (sc->sc_cid < 0) { |
399 | aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n" ); |
400 | goto fail_intr; |
401 | } |
402 | |
403 | WRITE_REG_0(sc, HIFN_0_PUCNFG, |
404 | READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); |
405 | ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; |
406 | |
407 | switch (ena) { |
408 | case HIFN_PUSTAT_ENA_2: |
409 | crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, |
410 | hifn_newsession, hifn_freesession, hifn_process, sc); |
411 | crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, |
412 | hifn_newsession, hifn_freesession, hifn_process, sc); |
413 | if (sc->sc_flags & HIFN_HAS_AES) |
414 | crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, |
415 | hifn_newsession, hifn_freesession, |
416 | hifn_process, sc); |
417 | /*FALLTHROUGH*/ |
418 | case HIFN_PUSTAT_ENA_1: |
419 | crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, |
420 | hifn_newsession, hifn_freesession, hifn_process, sc); |
421 | crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, |
422 | hifn_newsession, hifn_freesession, hifn_process, sc); |
423 | crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, |
424 | hifn_newsession, hifn_freesession, hifn_process, sc); |
425 | crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, |
426 | hifn_newsession, hifn_freesession, hifn_process, sc); |
427 | crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, |
428 | hifn_newsession, hifn_freesession, hifn_process, sc); |
429 | break; |
430 | } |
431 | |
432 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0, |
433 | sc->sc_dmamap->dm_mapsize, |
434 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
435 | |
436 | mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM); |
437 | |
438 | if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) { |
439 | hifn_init_pubrng(sc); |
440 | sc->sc_rng_need = RND_POOLBITS / NBBY; |
441 | } |
442 | |
443 | #ifdef __OpenBSD__ |
444 | timeout_set(&sc->sc_tickto, hifn_tick, sc); |
445 | timeout_add(&sc->sc_tickto, hz); |
446 | #else |
447 | callout_init(&sc->sc_tickto, CALLOUT_MPSAFE); |
448 | callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); |
449 | #endif |
450 | return; |
451 | |
452 | fail_intr: |
453 | pci_intr_disestablish(pc, sc->sc_ih); |
454 | fail_mem: |
455 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
456 | bus_dmamap_destroy(sc->sc_dmat, dmamap); |
457 | bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); |
458 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
459 | |
460 | /* Turn off DMA polling */ |
461 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | |
462 | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); |
463 | |
464 | fail_io1: |
465 | bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1); |
466 | fail_io0: |
467 | bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0); |
468 | } |
469 | |
470 | #ifdef __NetBSD__ |
471 | static int |
472 | hifn_detach(device_t self, int flags) |
473 | { |
474 | struct hifn_softc *sc = device_private(self); |
475 | |
476 | hifn_abort(sc); |
477 | |
478 | hifn_reset_board(sc, 1); |
479 | |
480 | pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih); |
481 | |
482 | crypto_unregister_all(sc->sc_cid); |
483 | |
484 | rnd_detach_source(&sc->sc_rnd_source); |
485 | |
486 | mutex_enter(&sc->sc_mtx); |
487 | callout_halt(&sc->sc_tickto, NULL); |
488 | if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) |
489 | callout_halt(&sc->sc_rngto, NULL); |
490 | mutex_exit(&sc->sc_mtx); |
491 | |
492 | bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1); |
493 | bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0); |
494 | |
495 | /* |
496 | * XXX It's not clear if any additional buffers have been |
497 | * XXX allocated and require free()ing |
498 | */ |
499 | |
500 | return 0; |
501 | } |
502 | |
503 | MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto" ); |
504 | |
505 | #ifdef _MODULE |
506 | #include "ioconf.c" |
507 | #endif |
508 | |
509 | static int |
510 | hifn_modcmd(modcmd_t cmd, void *data) |
511 | { |
512 | int error = 0; |
513 | |
514 | switch(cmd) { |
515 | case MODULE_CMD_INIT: |
516 | #ifdef _MODULE |
517 | error = config_init_component(cfdriver_ioconf_hifn, |
518 | cfattach_ioconf_hifn, cfdata_ioconf_hifn); |
519 | #endif |
520 | return error; |
521 | case MODULE_CMD_FINI: |
522 | #ifdef _MODULE |
523 | error = config_fini_component(cfdriver_ioconf_hifn, |
524 | cfattach_ioconf_hifn, cfdata_ioconf_hifn); |
525 | #endif |
526 | return error; |
527 | default: |
528 | return ENOTTY; |
529 | } |
530 | } |
531 | |
532 | #endif /* ifdef __NetBSD__ */ |
533 | |
534 | static void |
535 | hifn_rng_get(size_t bytes, void *priv) |
536 | { |
537 | struct hifn_softc *sc = priv; |
538 | |
539 | mutex_enter(&sc->sc_mtx); |
540 | sc->sc_rng_need = bytes; |
541 | callout_reset(&sc->sc_rngto, 0, hifn_rng, sc); |
542 | mutex_exit(&sc->sc_mtx); |
543 | } |
544 | |
545 | static int |
546 | hifn_init_pubrng(struct hifn_softc *sc) |
547 | { |
548 | u_int32_t r; |
549 | int i; |
550 | |
551 | if ((sc->sc_flags & HIFN_IS_7811) == 0) { |
552 | /* Reset 7951 public key/rng engine */ |
553 | WRITE_REG_1(sc, HIFN_1_PUB_RESET, |
554 | READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); |
555 | |
556 | for (i = 0; i < 100; i++) { |
557 | DELAY(1000); |
558 | if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & |
559 | HIFN_PUBRST_RESET) == 0) |
560 | break; |
561 | } |
562 | |
563 | if (i == 100) { |
564 | printf("%s: public key init failed\n" , |
565 | device_xname(sc->sc_dv)); |
566 | return (1); |
567 | } |
568 | } |
569 | |
570 | /* Enable the rng, if available */ |
571 | if (sc->sc_flags & HIFN_HAS_RNG) { |
572 | if (sc->sc_flags & HIFN_IS_7811) { |
573 | r = READ_REG_1(sc, HIFN_1_7811_RNGENA); |
574 | if (r & HIFN_7811_RNGENA_ENA) { |
575 | r &= ~HIFN_7811_RNGENA_ENA; |
576 | WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); |
577 | } |
578 | WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, |
579 | HIFN_7811_RNGCFG_DEFL); |
580 | r |= HIFN_7811_RNGENA_ENA; |
581 | WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); |
582 | } else |
583 | WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, |
584 | READ_REG_1(sc, HIFN_1_RNG_CONFIG) | |
585 | HIFN_RNGCFG_ENA); |
586 | |
587 | /* |
588 | * The Hifn RNG documentation states that at their |
589 | * recommended "conservative" RNG config values, |
590 | * the RNG must warm up for 0.4s before providing |
591 | * data that meet their worst-case estimate of 0.06 |
592 | * bits of random data per output register bit. |
593 | */ |
594 | DELAY(4000); |
595 | |
596 | #ifdef __NetBSD__ |
597 | rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc); |
598 | rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv), |
599 | RND_TYPE_RNG, |
600 | RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB); |
601 | #endif |
602 | |
603 | if (hz >= 100) |
604 | sc->sc_rnghz = hz / 100; |
605 | else |
606 | sc->sc_rnghz = 1; |
607 | #ifdef __OpenBSD__ |
608 | timeout_set(&sc->sc_rngto, hifn_rng, sc); |
609 | #else /* !__OpenBSD__ */ |
610 | callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); |
611 | #endif /* !__OpenBSD__ */ |
612 | } |
613 | |
614 | /* Enable public key engine, if available */ |
615 | if (sc->sc_flags & HIFN_HAS_PUBLIC) { |
616 | WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); |
617 | sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; |
618 | WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); |
619 | } |
620 | |
621 | /* Call directly into the RNG once to prime the pool. */ |
622 | hifn_rng(sc); /* Sets callout/timeout at end */ |
623 | |
624 | return (0); |
625 | } |
626 | |
627 | static void |
628 | hifn_rng_locked(void *vsc) |
629 | { |
630 | struct hifn_softc *sc = vsc; |
631 | #ifdef __NetBSD__ |
632 | uint32_t num[64]; |
633 | #else |
634 | uint32_t num[2]; |
635 | #endif |
636 | uint32_t sts; |
637 | int i; |
638 | size_t got, gotent; |
639 | |
640 | if (sc->sc_rng_need < 1) { |
641 | callout_stop(&sc->sc_rngto); |
642 | return; |
643 | } |
644 | |
645 | if (sc->sc_flags & HIFN_IS_7811) { |
646 | for (i = 0; i < 5; i++) { /* XXX why 5? */ |
647 | sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); |
648 | if (sts & HIFN_7811_RNGSTS_UFL) { |
649 | printf("%s: RNG underflow: disabling\n" , |
650 | device_xname(sc->sc_dv)); |
651 | return; |
652 | } |
653 | if ((sts & HIFN_7811_RNGSTS_RDY) == 0) |
654 | break; |
655 | |
656 | /* |
657 | * There are at least two words in the RNG FIFO |
658 | * at this point. |
659 | */ |
660 | num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); |
661 | num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); |
662 | got = 2 * sizeof(num[0]); |
663 | gotent = (got * NBBY) / HIFN_RNG_BITSPER; |
664 | |
665 | #ifdef __NetBSD__ |
666 | rnd_add_data(&sc->sc_rnd_source, num, got, gotent); |
667 | sc->sc_rng_need -= gotent; |
668 | #else |
669 | /* |
670 | * XXX This is a really bad idea. |
671 | * XXX Hifn estimate as little as 0.06 |
672 | * XXX actual bits of entropy per output |
673 | * XXX register bit. How can we tell the |
674 | * XXX kernel RNG subsystem we're handing |
675 | * XXX it 64 "true" random bits, for any |
676 | * XXX sane value of "true"? |
677 | * XXX |
678 | * XXX The right thing to do here, if we |
679 | * XXX cannot supply an estimate ourselves, |
680 | * XXX would be to hash the bits locally. |
681 | */ |
682 | add_true_randomness(num[0]); |
683 | add_true_randomness(num[1]); |
684 | #endif |
685 | |
686 | } |
687 | } else { |
688 | int nwords = 0; |
689 | |
690 | if (sc->sc_rng_need) { |
691 | nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER; |
692 | nwords = MIN(__arraycount(num), nwords); |
693 | } |
694 | |
695 | if (nwords < 2) { |
696 | nwords = 2; |
697 | } |
698 | |
699 | /* |
700 | * We must be *extremely* careful here. The Hifn |
701 | * 795x differ from the published 6500 RNG design |
702 | * in more ways than the obvious lack of the output |
703 | * FIFO and LFSR control registers. In fact, there |
704 | * is only one LFSR, instead of the 6500's two, and |
705 | * it's 32 bits, not 31. |
706 | * |
707 | * Further, a block diagram obtained from Hifn shows |
708 | * a very curious latching of this register: the LFSR |
709 | * rotates at a frequency of RNG_Clk / 8, but the |
710 | * RNG_Data register is latched at a frequency of |
711 | * RNG_Clk, which means that it is possible for |
712 | * consecutive reads of the RNG_Data register to read |
713 | * identical state from the LFSR. The simplest |
714 | * workaround seems to be to read eight samples from |
715 | * the register for each one that we use. Since each |
716 | * read must require at least one PCI cycle, and |
717 | * RNG_Clk is at least PCI_Clk, this is safe. |
718 | */ |
719 | for(i = 0 ; i < nwords * 8; i++) |
720 | { |
721 | volatile u_int32_t regtmp; |
722 | regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA); |
723 | num[i / 8] = regtmp; |
724 | } |
725 | |
726 | got = nwords * sizeof(num[0]); |
727 | gotent = (got * NBBY) / HIFN_RNG_BITSPER; |
728 | #ifdef __NetBSD__ |
729 | rnd_add_data(&sc->sc_rnd_source, num, got, gotent); |
730 | sc->sc_rng_need -= gotent; |
731 | #else |
732 | /* XXX a bad idea; see 7811 block above */ |
733 | add_true_randomness(num[0]); |
734 | #endif |
735 | } |
736 | |
737 | #ifdef __OpenBSD__ |
738 | timeout_add(&sc->sc_rngto, sc->sc_rnghz); |
739 | #else |
740 | if (sc->sc_rng_need > 0) { |
741 | callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); |
742 | } |
743 | #endif |
744 | } |
745 | |
746 | static void |
747 | hifn_rng(void *vsc) |
748 | { |
749 | struct hifn_softc *sc = vsc; |
750 | |
751 | mutex_spin_enter(&sc->sc_mtx); |
752 | hifn_rng_locked(vsc); |
753 | mutex_spin_exit(&sc->sc_mtx); |
754 | } |
755 | |
756 | static void |
757 | hifn_puc_wait(struct hifn_softc *sc) |
758 | { |
759 | int i; |
760 | |
761 | for (i = 5000; i > 0; i--) { |
762 | DELAY(1); |
763 | if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) |
764 | break; |
765 | } |
766 | if (!i) |
767 | printf("%s: proc unit did not reset\n" , device_xname(sc->sc_dv)); |
768 | } |
769 | |
770 | /* |
771 | * Reset the processing unit. |
772 | */ |
773 | static void |
774 | hifn_reset_puc(struct hifn_softc *sc) |
775 | { |
776 | /* Reset processing unit */ |
777 | WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); |
778 | hifn_puc_wait(sc); |
779 | } |
780 | |
781 | static void |
782 | hifn_set_retry(struct hifn_softc *sc) |
783 | { |
784 | u_int32_t r; |
785 | |
786 | r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT); |
787 | r &= 0xffff0000; |
788 | pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r); |
789 | } |
790 | |
791 | /* |
792 | * Resets the board. Values in the regesters are left as is |
793 | * from the reset (i.e. initial values are assigned elsewhere). |
794 | */ |
795 | static void |
796 | hifn_reset_board(struct hifn_softc *sc, int full) |
797 | { |
798 | u_int32_t reg; |
799 | |
800 | /* |
801 | * Set polling in the DMA configuration register to zero. 0x7 avoids |
802 | * resetting the board and zeros out the other fields. |
803 | */ |
804 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | |
805 | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); |
806 | |
807 | /* |
808 | * Now that polling has been disabled, we have to wait 1 ms |
809 | * before resetting the board. |
810 | */ |
811 | DELAY(1000); |
812 | |
813 | /* Reset the DMA unit */ |
814 | if (full) { |
815 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); |
816 | DELAY(1000); |
817 | } else { |
818 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, |
819 | HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); |
820 | hifn_reset_puc(sc); |
821 | } |
822 | |
823 | memset(sc->sc_dma, 0, sizeof(*sc->sc_dma)); |
824 | |
825 | /* Bring dma unit out of reset */ |
826 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | |
827 | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); |
828 | |
829 | hifn_puc_wait(sc); |
830 | |
831 | hifn_set_retry(sc); |
832 | |
833 | if (sc->sc_flags & HIFN_IS_7811) { |
834 | for (reg = 0; reg < 1000; reg++) { |
835 | if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & |
836 | HIFN_MIPSRST_CRAMINIT) |
837 | break; |
838 | DELAY(1000); |
839 | } |
840 | if (reg == 1000) |
841 | printf(": cram init timeout\n" ); |
842 | } |
843 | } |
844 | |
845 | static u_int32_t |
846 | hifn_next_signature(u_int32_t a, u_int cnt) |
847 | { |
848 | int i; |
849 | u_int32_t v; |
850 | |
851 | for (i = 0; i < cnt; i++) { |
852 | |
853 | /* get the parity */ |
854 | v = a & 0x80080125; |
855 | v ^= v >> 16; |
856 | v ^= v >> 8; |
857 | v ^= v >> 4; |
858 | v ^= v >> 2; |
859 | v ^= v >> 1; |
860 | |
861 | a = (v & 1) ^ (a << 1); |
862 | } |
863 | |
864 | return a; |
865 | } |
866 | |
867 | static struct pci2id { |
868 | u_short pci_vendor; |
869 | u_short pci_prod; |
870 | char card_id[13]; |
871 | } const pci2id[] = { |
872 | { |
873 | PCI_VENDOR_HIFN, |
874 | PCI_PRODUCT_HIFN_7951, |
875 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
876 | 0x00, 0x00, 0x00, 0x00, 0x00 } |
877 | }, { |
878 | PCI_VENDOR_HIFN, |
879 | PCI_PRODUCT_HIFN_7955, |
880 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
881 | 0x00, 0x00, 0x00, 0x00, 0x00 } |
882 | }, { |
883 | PCI_VENDOR_HIFN, |
884 | PCI_PRODUCT_HIFN_7956, |
885 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
886 | 0x00, 0x00, 0x00, 0x00, 0x00 } |
887 | }, { |
888 | PCI_VENDOR_NETSEC, |
889 | PCI_PRODUCT_NETSEC_7751, |
890 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
891 | 0x00, 0x00, 0x00, 0x00, 0x00 } |
892 | }, { |
893 | PCI_VENDOR_INVERTEX, |
894 | PCI_PRODUCT_INVERTEX_AEON, |
895 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
896 | 0x00, 0x00, 0x00, 0x00, 0x00 } |
897 | }, { |
898 | PCI_VENDOR_HIFN, |
899 | PCI_PRODUCT_HIFN_7811, |
900 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
901 | 0x00, 0x00, 0x00, 0x00, 0x00 } |
902 | }, { |
903 | /* |
904 | * Other vendors share this PCI ID as well, such as |
905 | * http://www.powercrypt.com, and obviously they also |
906 | * use the same key. |
907 | */ |
908 | PCI_VENDOR_HIFN, |
909 | PCI_PRODUCT_HIFN_7751, |
910 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
911 | 0x00, 0x00, 0x00, 0x00, 0x00 } |
912 | }, |
913 | }; |
914 | |
915 | /* |
916 | * Checks to see if crypto is already enabled. If crypto isn't enable, |
917 | * "hifn_enable_crypto" is called to enable it. The check is important, |
918 | * as enabling crypto twice will lock the board. |
919 | */ |
920 | static const char * |
921 | hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid) |
922 | { |
923 | u_int32_t dmacfg, ramcfg, encl, addr, i; |
924 | const char *offtbl = NULL; |
925 | |
926 | for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { |
927 | if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) && |
928 | pci2id[i].pci_prod == PCI_PRODUCT(pciid)) { |
929 | offtbl = pci2id[i].card_id; |
930 | break; |
931 | } |
932 | } |
933 | |
934 | if (offtbl == NULL) { |
935 | #ifdef HIFN_DEBUG |
936 | aprint_debug_dev(sc->sc_dv, "Unknown card!\n" ); |
937 | #endif |
938 | return (NULL); |
939 | } |
940 | |
941 | ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); |
942 | dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); |
943 | |
944 | /* |
945 | * The RAM config register's encrypt level bit needs to be set before |
946 | * every read performed on the encryption level register. |
947 | */ |
948 | WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); |
949 | |
950 | encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; |
951 | |
952 | /* |
953 | * Make sure we don't re-unlock. Two unlocks kills chip until the |
954 | * next reboot. |
955 | */ |
956 | if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { |
957 | #ifdef HIFN_DEBUG |
958 | aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n" ); |
959 | #endif |
960 | goto report; |
961 | } |
962 | |
963 | if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { |
964 | #ifdef HIFN_DEBUG |
965 | aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n" ); |
966 | #endif |
967 | return (NULL); |
968 | } |
969 | |
970 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | |
971 | HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); |
972 | DELAY(1000); |
973 | addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1); |
974 | DELAY(1000); |
975 | WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0); |
976 | DELAY(1000); |
977 | |
978 | for (i = 0; i <= 12; i++) { |
979 | addr = hifn_next_signature(addr, offtbl[i] + 0x101); |
980 | WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr); |
981 | |
982 | DELAY(1000); |
983 | } |
984 | |
985 | WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); |
986 | encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; |
987 | |
988 | #ifdef HIFN_DEBUG |
989 | if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) |
990 | aprint_debug("Encryption engine is permanently locked until next system reset." ); |
991 | else |
992 | aprint_debug("Encryption engine enabled successfully!" ); |
993 | #endif |
994 | |
995 | report: |
996 | WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); |
997 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); |
998 | |
999 | switch (encl) { |
1000 | case HIFN_PUSTAT_ENA_0: |
1001 | return ("LZS-only (no encr/auth)" ); |
1002 | |
1003 | case HIFN_PUSTAT_ENA_1: |
1004 | return ("DES" ); |
1005 | |
1006 | case HIFN_PUSTAT_ENA_2: |
1007 | if (sc->sc_flags & HIFN_HAS_AES) |
1008 | return ("3DES/AES" ); |
1009 | else |
1010 | return ("3DES" ); |
1011 | |
1012 | default: |
1013 | return ("disabled" ); |
1014 | } |
1015 | /* NOTREACHED */ |
1016 | } |
1017 | |
1018 | /* |
1019 | * Give initial values to the registers listed in the "Register Space" |
1020 | * section of the HIFN Software Development reference manual. |
1021 | */ |
1022 | static void |
1023 | hifn_init_pci_registers(struct hifn_softc *sc) |
1024 | { |
1025 | /* write fixed values needed by the Initialization registers */ |
1026 | WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); |
1027 | WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); |
1028 | WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); |
1029 | |
1030 | /* write all 4 ring address registers */ |
1031 | WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr + |
1032 | offsetof(struct hifn_dma, cmdr[0])); |
1033 | WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr + |
1034 | offsetof(struct hifn_dma, srcr[0])); |
1035 | WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr + |
1036 | offsetof(struct hifn_dma, dstr[0])); |
1037 | WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr + |
1038 | offsetof(struct hifn_dma, resr[0])); |
1039 | |
1040 | DELAY(2000); |
1041 | |
1042 | /* write status register */ |
1043 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, |
1044 | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | |
1045 | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | |
1046 | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | |
1047 | HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | |
1048 | HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | |
1049 | HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | |
1050 | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | |
1051 | HIFN_DMACSR_S_WAIT | |
1052 | HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | |
1053 | HIFN_DMACSR_C_WAIT | |
1054 | HIFN_DMACSR_ENGINE | |
1055 | ((sc->sc_flags & HIFN_HAS_PUBLIC) ? |
1056 | HIFN_DMACSR_PUBDONE : 0) | |
1057 | ((sc->sc_flags & HIFN_IS_7811) ? |
1058 | HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); |
1059 | |
1060 | sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; |
1061 | sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | |
1062 | HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | |
1063 | HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | |
1064 | HIFN_DMAIER_ENGINE | |
1065 | ((sc->sc_flags & HIFN_IS_7811) ? |
1066 | HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); |
1067 | sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; |
1068 | WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); |
1069 | CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2); |
1070 | |
1071 | if (sc->sc_flags & HIFN_IS_7956) { |
1072 | WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | |
1073 | HIFN_PUCNFG_TCALLPHASES | |
1074 | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); |
1075 | WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); |
1076 | } else { |
1077 | WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | |
1078 | HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | |
1079 | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | |
1080 | (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); |
1081 | } |
1082 | |
1083 | WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); |
1084 | WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | |
1085 | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | |
1086 | ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | |
1087 | ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); |
1088 | } |
1089 | |
1090 | /* |
1091 | * The maximum number of sessions supported by the card |
1092 | * is dependent on the amount of context ram, which |
1093 | * encryption algorithms are enabled, and how compression |
1094 | * is configured. This should be configured before this |
1095 | * routine is called. |
1096 | */ |
1097 | static void |
1098 | hifn_sessions(struct hifn_softc *sc) |
1099 | { |
1100 | u_int32_t pucnfg; |
1101 | int ctxsize; |
1102 | |
1103 | pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); |
1104 | |
1105 | if (pucnfg & HIFN_PUCNFG_COMPSING) { |
1106 | if (pucnfg & HIFN_PUCNFG_ENCCNFG) |
1107 | ctxsize = 128; |
1108 | else |
1109 | ctxsize = 512; |
1110 | /* |
1111 | * 7955/7956 has internal context memory of 32K |
1112 | */ |
1113 | if (sc->sc_flags & HIFN_IS_7956) |
1114 | sc->sc_maxses = 32768 / ctxsize; |
1115 | else |
1116 | sc->sc_maxses = 1 + |
1117 | ((sc->sc_ramsize - 32768) / ctxsize); |
1118 | } |
1119 | else |
1120 | sc->sc_maxses = sc->sc_ramsize / 16384; |
1121 | |
1122 | if (sc->sc_maxses > 2048) |
1123 | sc->sc_maxses = 2048; |
1124 | } |
1125 | |
1126 | /* |
1127 | * Determine ram type (sram or dram). Board should be just out of a reset |
1128 | * state when this is called. |
1129 | */ |
1130 | static int |
1131 | hifn_ramtype(struct hifn_softc *sc) |
1132 | { |
1133 | u_int8_t data[8], dataexpect[8]; |
1134 | int i; |
1135 | |
1136 | for (i = 0; i < sizeof(data); i++) |
1137 | data[i] = dataexpect[i] = 0x55; |
1138 | if (hifn_writeramaddr(sc, 0, data)) |
1139 | return (-1); |
1140 | if (hifn_readramaddr(sc, 0, data)) |
1141 | return (-1); |
1142 | if (memcmp(data, dataexpect, sizeof(data)) != 0) { |
1143 | sc->sc_drammodel = 1; |
1144 | return (0); |
1145 | } |
1146 | |
1147 | for (i = 0; i < sizeof(data); i++) |
1148 | data[i] = dataexpect[i] = 0xaa; |
1149 | if (hifn_writeramaddr(sc, 0, data)) |
1150 | return (-1); |
1151 | if (hifn_readramaddr(sc, 0, data)) |
1152 | return (-1); |
1153 | if (memcmp(data, dataexpect, sizeof(data)) != 0) { |
1154 | sc->sc_drammodel = 1; |
1155 | return (0); |
1156 | } |
1157 | |
1158 | return (0); |
1159 | } |
1160 | |
1161 | #define HIFN_SRAM_MAX (32 << 20) |
1162 | #define HIFN_SRAM_STEP_SIZE 16384 |
1163 | #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) |
1164 | |
1165 | static int |
1166 | hifn_sramsize(struct hifn_softc *sc) |
1167 | { |
1168 | u_int32_t a; |
1169 | u_int8_t data[8]; |
1170 | u_int8_t dataexpect[sizeof(data)]; |
1171 | int32_t i; |
1172 | |
1173 | for (i = 0; i < sizeof(data); i++) |
1174 | data[i] = dataexpect[i] = i ^ 0x5a; |
1175 | |
1176 | for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { |
1177 | a = i * HIFN_SRAM_STEP_SIZE; |
1178 | memcpy(data, &i, sizeof(i)); |
1179 | hifn_writeramaddr(sc, a, data); |
1180 | } |
1181 | |
1182 | for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { |
1183 | a = i * HIFN_SRAM_STEP_SIZE; |
1184 | memcpy(dataexpect, &i, sizeof(i)); |
1185 | if (hifn_readramaddr(sc, a, data) < 0) |
1186 | return (0); |
1187 | if (memcmp(data, dataexpect, sizeof(data)) != 0) |
1188 | return (0); |
1189 | sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; |
1190 | } |
1191 | |
1192 | return (0); |
1193 | } |
1194 | |
1195 | /* |
1196 | * XXX For dram boards, one should really try all of the |
1197 | * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG |
1198 | * is already set up correctly. |
1199 | */ |
1200 | static int |
1201 | hifn_dramsize(struct hifn_softc *sc) |
1202 | { |
1203 | u_int32_t cnfg; |
1204 | |
1205 | if (sc->sc_flags & HIFN_IS_7956) { |
1206 | /* |
1207 | * 7955/7956 have a fixed internal ram of only 32K. |
1208 | */ |
1209 | sc->sc_ramsize = 32768; |
1210 | } else { |
1211 | cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & |
1212 | HIFN_PUCNFG_DRAMMASK; |
1213 | sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); |
1214 | } |
1215 | return (0); |
1216 | } |
1217 | |
1218 | static void |
1219 | hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, |
1220 | int *resp) |
1221 | { |
1222 | struct hifn_dma *dma = sc->sc_dma; |
1223 | |
1224 | if (dma->cmdi == HIFN_D_CMD_RSIZE) { |
1225 | dma->cmdi = 0; |
1226 | dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | |
1227 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1228 | HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, |
1229 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1230 | } |
1231 | *cmdp = dma->cmdi++; |
1232 | dma->cmdk = dma->cmdi; |
1233 | |
1234 | if (dma->srci == HIFN_D_SRC_RSIZE) { |
1235 | dma->srci = 0; |
1236 | dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | |
1237 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1238 | HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, |
1239 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1240 | } |
1241 | *srcp = dma->srci++; |
1242 | dma->srck = dma->srci; |
1243 | |
1244 | if (dma->dsti == HIFN_D_DST_RSIZE) { |
1245 | dma->dsti = 0; |
1246 | dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | |
1247 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1248 | HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, |
1249 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1250 | } |
1251 | *dstp = dma->dsti++; |
1252 | dma->dstk = dma->dsti; |
1253 | |
1254 | if (dma->resi == HIFN_D_RES_RSIZE) { |
1255 | dma->resi = 0; |
1256 | dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | |
1257 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1258 | HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, |
1259 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1260 | } |
1261 | *resp = dma->resi++; |
1262 | dma->resk = dma->resi; |
1263 | } |
1264 | |
1265 | static int |
1266 | hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) |
1267 | { |
1268 | struct hifn_dma *dma = sc->sc_dma; |
1269 | struct hifn_base_command wc; |
1270 | const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; |
1271 | int r, cmdi, resi, srci, dsti; |
1272 | |
1273 | wc.masks = htole16(3 << 13); |
1274 | wc.session_num = htole16(addr >> 14); |
1275 | wc.total_source_count = htole16(8); |
1276 | wc.total_dest_count = htole16(addr & 0x3fff); |
1277 | |
1278 | hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); |
1279 | |
1280 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, |
1281 | HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | |
1282 | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); |
1283 | |
1284 | /* build write command */ |
1285 | memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND); |
1286 | *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc; |
1287 | memcpy(&dma->test_src, data, sizeof(dma->test_src)); |
1288 | |
1289 | dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr |
1290 | + offsetof(struct hifn_dma, test_src)); |
1291 | dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr |
1292 | + offsetof(struct hifn_dma, test_dst)); |
1293 | |
1294 | dma->cmdr[cmdi].l = htole32(16 | masks); |
1295 | dma->srcr[srci].l = htole32(8 | masks); |
1296 | dma->dstr[dsti].l = htole32(4 | masks); |
1297 | dma->resr[resi].l = htole32(4 | masks); |
1298 | |
1299 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1300 | 0, sc->sc_dmamap->dm_mapsize, |
1301 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1302 | |
1303 | for (r = 10000; r >= 0; r--) { |
1304 | DELAY(10); |
1305 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1306 | 0, sc->sc_dmamap->dm_mapsize, |
1307 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1308 | if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) |
1309 | break; |
1310 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1311 | 0, sc->sc_dmamap->dm_mapsize, |
1312 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1313 | } |
1314 | if (r == 0) { |
1315 | printf("%s: writeramaddr -- " |
1316 | "result[%d](addr %d) still valid\n" , |
1317 | device_xname(sc->sc_dv), resi, addr); |
1318 | r = -1; |
1319 | return (-1); |
1320 | } else |
1321 | r = 0; |
1322 | |
1323 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, |
1324 | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | |
1325 | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); |
1326 | |
1327 | return (r); |
1328 | } |
1329 | |
1330 | static int |
1331 | hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) |
1332 | { |
1333 | struct hifn_dma *dma = sc->sc_dma; |
1334 | struct hifn_base_command rc; |
1335 | const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; |
1336 | int r, cmdi, srci, dsti, resi; |
1337 | |
1338 | rc.masks = htole16(2 << 13); |
1339 | rc.session_num = htole16(addr >> 14); |
1340 | rc.total_source_count = htole16(addr & 0x3fff); |
1341 | rc.total_dest_count = htole16(8); |
1342 | |
1343 | hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); |
1344 | |
1345 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, |
1346 | HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | |
1347 | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); |
1348 | |
1349 | memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND); |
1350 | *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc; |
1351 | |
1352 | dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1353 | offsetof(struct hifn_dma, test_src)); |
1354 | dma->test_src = 0; |
1355 | dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1356 | offsetof(struct hifn_dma, test_dst)); |
1357 | dma->test_dst = 0; |
1358 | dma->cmdr[cmdi].l = htole32(8 | masks); |
1359 | dma->srcr[srci].l = htole32(8 | masks); |
1360 | dma->dstr[dsti].l = htole32(8 | masks); |
1361 | dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); |
1362 | |
1363 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1364 | 0, sc->sc_dmamap->dm_mapsize, |
1365 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1366 | |
1367 | for (r = 10000; r >= 0; r--) { |
1368 | DELAY(10); |
1369 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1370 | 0, sc->sc_dmamap->dm_mapsize, |
1371 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1372 | if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) |
1373 | break; |
1374 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
1375 | 0, sc->sc_dmamap->dm_mapsize, |
1376 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1377 | } |
1378 | if (r == 0) { |
1379 | printf("%s: readramaddr -- " |
1380 | "result[%d](addr %d) still valid\n" , |
1381 | device_xname(sc->sc_dv), resi, addr); |
1382 | r = -1; |
1383 | } else { |
1384 | r = 0; |
1385 | memcpy(data, &dma->test_dst, sizeof(dma->test_dst)); |
1386 | } |
1387 | |
1388 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, |
1389 | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | |
1390 | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); |
1391 | |
1392 | return (r); |
1393 | } |
1394 | |
1395 | /* |
1396 | * Initialize the descriptor rings. |
1397 | */ |
1398 | static void |
1399 | hifn_init_dma(struct hifn_softc *sc) |
1400 | { |
1401 | struct hifn_dma *dma = sc->sc_dma; |
1402 | int i; |
1403 | |
1404 | hifn_set_retry(sc); |
1405 | |
1406 | /* initialize static pointer values */ |
1407 | for (i = 0; i < HIFN_D_CMD_RSIZE; i++) |
1408 | dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1409 | offsetof(struct hifn_dma, command_bufs[i][0])); |
1410 | for (i = 0; i < HIFN_D_RES_RSIZE; i++) |
1411 | dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1412 | offsetof(struct hifn_dma, result_bufs[i][0])); |
1413 | |
1414 | dma->cmdr[HIFN_D_CMD_RSIZE].p = |
1415 | htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1416 | offsetof(struct hifn_dma, cmdr[0])); |
1417 | dma->srcr[HIFN_D_SRC_RSIZE].p = |
1418 | htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1419 | offsetof(struct hifn_dma, srcr[0])); |
1420 | dma->dstr[HIFN_D_DST_RSIZE].p = |
1421 | htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1422 | offsetof(struct hifn_dma, dstr[0])); |
1423 | dma->resr[HIFN_D_RES_RSIZE].p = |
1424 | htole32(sc->sc_dmamap->dm_segs[0].ds_addr + |
1425 | offsetof(struct hifn_dma, resr[0])); |
1426 | |
1427 | dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; |
1428 | dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; |
1429 | dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; |
1430 | } |
1431 | |
1432 | /* |
1433 | * Writes out the raw command buffer space. Returns the |
1434 | * command buffer size. |
1435 | */ |
1436 | static u_int |
1437 | hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) |
1438 | { |
1439 | u_int8_t *buf_pos; |
1440 | struct hifn_base_command *base_cmd; |
1441 | struct hifn_mac_command *mac_cmd; |
1442 | struct hifn_crypt_command *cry_cmd; |
1443 | struct hifn_comp_command *comp_cmd; |
1444 | int using_mac, using_crypt, using_comp, len, ivlen; |
1445 | u_int32_t dlen, slen; |
1446 | |
1447 | buf_pos = buf; |
1448 | using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; |
1449 | using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; |
1450 | using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP; |
1451 | |
1452 | base_cmd = (struct hifn_base_command *)buf_pos; |
1453 | base_cmd->masks = htole16(cmd->base_masks); |
1454 | slen = cmd->src_map->dm_mapsize; |
1455 | if (cmd->sloplen) |
1456 | dlen = cmd->dst_map->dm_mapsize - cmd->sloplen + |
1457 | sizeof(u_int32_t); |
1458 | else |
1459 | dlen = cmd->dst_map->dm_mapsize; |
1460 | base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); |
1461 | base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); |
1462 | dlen >>= 16; |
1463 | slen >>= 16; |
1464 | base_cmd->session_num = htole16(cmd->session_num | |
1465 | ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | |
1466 | ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); |
1467 | buf_pos += sizeof(struct hifn_base_command); |
1468 | |
1469 | if (using_comp) { |
1470 | comp_cmd = (struct hifn_comp_command *)buf_pos; |
1471 | dlen = cmd->compcrd->crd_len; |
1472 | comp_cmd->source_count = htole16(dlen & 0xffff); |
1473 | dlen >>= 16; |
1474 | comp_cmd->masks = htole16(cmd->comp_masks | |
1475 | ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M)); |
1476 | comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip); |
1477 | comp_cmd->reserved = 0; |
1478 | buf_pos += sizeof(struct hifn_comp_command); |
1479 | } |
1480 | |
1481 | if (using_mac) { |
1482 | mac_cmd = (struct hifn_mac_command *)buf_pos; |
1483 | dlen = cmd->maccrd->crd_len; |
1484 | mac_cmd->source_count = htole16(dlen & 0xffff); |
1485 | dlen >>= 16; |
1486 | mac_cmd->masks = htole16(cmd->mac_masks | |
1487 | ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); |
1488 | mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); |
1489 | mac_cmd->reserved = 0; |
1490 | buf_pos += sizeof(struct hifn_mac_command); |
1491 | } |
1492 | |
1493 | if (using_crypt) { |
1494 | cry_cmd = (struct hifn_crypt_command *)buf_pos; |
1495 | dlen = cmd->enccrd->crd_len; |
1496 | cry_cmd->source_count = htole16(dlen & 0xffff); |
1497 | dlen >>= 16; |
1498 | cry_cmd->masks = htole16(cmd->cry_masks | |
1499 | ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); |
1500 | cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); |
1501 | cry_cmd->reserved = 0; |
1502 | buf_pos += sizeof(struct hifn_crypt_command); |
1503 | } |
1504 | |
1505 | if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { |
1506 | memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH); |
1507 | buf_pos += HIFN_MAC_KEY_LENGTH; |
1508 | } |
1509 | |
1510 | if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { |
1511 | switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { |
1512 | case HIFN_CRYPT_CMD_ALG_3DES: |
1513 | memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH); |
1514 | buf_pos += HIFN_3DES_KEY_LENGTH; |
1515 | break; |
1516 | case HIFN_CRYPT_CMD_ALG_DES: |
1517 | memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH); |
1518 | buf_pos += HIFN_DES_KEY_LENGTH; |
1519 | break; |
1520 | case HIFN_CRYPT_CMD_ALG_RC4: |
1521 | len = 256; |
1522 | do { |
1523 | int clen; |
1524 | |
1525 | clen = MIN(cmd->cklen, len); |
1526 | memcpy(buf_pos, cmd->ck, clen); |
1527 | len -= clen; |
1528 | buf_pos += clen; |
1529 | } while (len > 0); |
1530 | memset(buf_pos, 0, 4); |
1531 | buf_pos += 4; |
1532 | break; |
1533 | case HIFN_CRYPT_CMD_ALG_AES: |
1534 | /* |
1535 | * AES keys are variable 128, 192 and |
1536 | * 256 bits (16, 24 and 32 bytes). |
1537 | */ |
1538 | memcpy(buf_pos, cmd->ck, cmd->cklen); |
1539 | buf_pos += cmd->cklen; |
1540 | break; |
1541 | } |
1542 | } |
1543 | |
1544 | if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { |
1545 | switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { |
1546 | case HIFN_CRYPT_CMD_ALG_AES: |
1547 | ivlen = HIFN_AES_IV_LENGTH; |
1548 | break; |
1549 | default: |
1550 | ivlen = HIFN_IV_LENGTH; |
1551 | break; |
1552 | } |
1553 | memcpy(buf_pos, cmd->iv, ivlen); |
1554 | buf_pos += ivlen; |
1555 | } |
1556 | |
1557 | if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT | |
1558 | HIFN_BASE_CMD_COMP)) == 0) { |
1559 | memset(buf_pos, 0, 8); |
1560 | buf_pos += 8; |
1561 | } |
1562 | |
1563 | return (buf_pos - buf); |
1564 | } |
1565 | |
1566 | static int |
1567 | hifn_dmamap_aligned(bus_dmamap_t map) |
1568 | { |
1569 | int i; |
1570 | |
1571 | for (i = 0; i < map->dm_nsegs; i++) { |
1572 | if (map->dm_segs[i].ds_addr & 3) |
1573 | return (0); |
1574 | if ((i != (map->dm_nsegs - 1)) && |
1575 | (map->dm_segs[i].ds_len & 3)) |
1576 | return (0); |
1577 | } |
1578 | return (1); |
1579 | } |
1580 | |
1581 | static int |
1582 | hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) |
1583 | { |
1584 | struct hifn_dma *dma = sc->sc_dma; |
1585 | bus_dmamap_t map = cmd->dst_map; |
1586 | u_int32_t p, l; |
1587 | int idx, used = 0, i; |
1588 | |
1589 | idx = dma->dsti; |
1590 | for (i = 0; i < map->dm_nsegs - 1; i++) { |
1591 | dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); |
1592 | dma->dstr[idx].l = htole32(HIFN_D_VALID | |
1593 | HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len); |
1594 | HIFN_DSTR_SYNC(sc, idx, |
1595 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1596 | used++; |
1597 | |
1598 | if (++idx == HIFN_D_DST_RSIZE) { |
1599 | dma->dstr[idx].l = htole32(HIFN_D_VALID | |
1600 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1601 | HIFN_DSTR_SYNC(sc, idx, |
1602 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1603 | idx = 0; |
1604 | } |
1605 | } |
1606 | |
1607 | if (cmd->sloplen == 0) { |
1608 | p = map->dm_segs[i].ds_addr; |
1609 | l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | |
1610 | map->dm_segs[i].ds_len; |
1611 | } else { |
1612 | p = sc->sc_dmamap->dm_segs[0].ds_addr + |
1613 | offsetof(struct hifn_dma, slop[cmd->slopidx]); |
1614 | l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | |
1615 | sizeof(u_int32_t); |
1616 | |
1617 | if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) { |
1618 | dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); |
1619 | dma->dstr[idx].l = htole32(HIFN_D_VALID | |
1620 | HIFN_D_MASKDONEIRQ | |
1621 | (map->dm_segs[i].ds_len - cmd->sloplen)); |
1622 | HIFN_DSTR_SYNC(sc, idx, |
1623 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1624 | used++; |
1625 | |
1626 | if (++idx == HIFN_D_DST_RSIZE) { |
1627 | dma->dstr[idx].l = htole32(HIFN_D_VALID | |
1628 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1629 | HIFN_DSTR_SYNC(sc, idx, |
1630 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1631 | idx = 0; |
1632 | } |
1633 | } |
1634 | } |
1635 | dma->dstr[idx].p = htole32(p); |
1636 | dma->dstr[idx].l = htole32(l); |
1637 | HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1638 | used++; |
1639 | |
1640 | if (++idx == HIFN_D_DST_RSIZE) { |
1641 | dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | |
1642 | HIFN_D_MASKDONEIRQ); |
1643 | HIFN_DSTR_SYNC(sc, idx, |
1644 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1645 | idx = 0; |
1646 | } |
1647 | |
1648 | dma->dsti = idx; |
1649 | dma->dstu += used; |
1650 | return (idx); |
1651 | } |
1652 | |
1653 | static int |
1654 | hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) |
1655 | { |
1656 | struct hifn_dma *dma = sc->sc_dma; |
1657 | bus_dmamap_t map = cmd->src_map; |
1658 | int idx, i; |
1659 | u_int32_t last = 0; |
1660 | |
1661 | idx = dma->srci; |
1662 | for (i = 0; i < map->dm_nsegs; i++) { |
1663 | if (i == map->dm_nsegs - 1) |
1664 | last = HIFN_D_LAST; |
1665 | |
1666 | dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr); |
1667 | dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len | |
1668 | HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); |
1669 | HIFN_SRCR_SYNC(sc, idx, |
1670 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1671 | |
1672 | if (++idx == HIFN_D_SRC_RSIZE) { |
1673 | dma->srcr[idx].l = htole32(HIFN_D_VALID | |
1674 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1675 | HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, |
1676 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1677 | idx = 0; |
1678 | } |
1679 | } |
1680 | dma->srci = idx; |
1681 | dma->srcu += map->dm_nsegs; |
1682 | return (idx); |
1683 | } |
1684 | |
1685 | static int |
1686 | hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd, |
1687 | struct cryptop *crp, int hint) |
1688 | { |
1689 | struct hifn_dma *dma = sc->sc_dma; |
1690 | u_int32_t cmdlen; |
1691 | int cmdi, resi, err = 0; |
1692 | |
1693 | if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, |
1694 | HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) |
1695 | return (ENOMEM); |
1696 | |
1697 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
1698 | if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, |
1699 | cmd->srcu.src_m, BUS_DMA_NOWAIT)) { |
1700 | err = ENOMEM; |
1701 | goto err_srcmap1; |
1702 | } |
1703 | } else if (crp->crp_flags & CRYPTO_F_IOV) { |
1704 | if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, |
1705 | cmd->srcu.src_io, BUS_DMA_NOWAIT)) { |
1706 | err = ENOMEM; |
1707 | goto err_srcmap1; |
1708 | } |
1709 | } else { |
1710 | err = EINVAL; |
1711 | goto err_srcmap1; |
1712 | } |
1713 | |
1714 | if (hifn_dmamap_aligned(cmd->src_map)) { |
1715 | cmd->sloplen = cmd->src_map->dm_mapsize & 3; |
1716 | if (crp->crp_flags & CRYPTO_F_IOV) |
1717 | cmd->dstu.dst_io = cmd->srcu.src_io; |
1718 | else if (crp->crp_flags & CRYPTO_F_IMBUF) |
1719 | cmd->dstu.dst_m = cmd->srcu.src_m; |
1720 | cmd->dst_map = cmd->src_map; |
1721 | } else { |
1722 | if (crp->crp_flags & CRYPTO_F_IOV) { |
1723 | err = EINVAL; |
1724 | goto err_srcmap; |
1725 | } else if (crp->crp_flags & CRYPTO_F_IMBUF) { |
1726 | int totlen, len; |
1727 | struct mbuf *m, *m0, *mlast; |
1728 | |
1729 | totlen = cmd->src_map->dm_mapsize; |
1730 | if (cmd->srcu.src_m->m_flags & M_PKTHDR) { |
1731 | len = MHLEN; |
1732 | MGETHDR(m0, M_DONTWAIT, MT_DATA); |
1733 | } else { |
1734 | len = MLEN; |
1735 | MGET(m0, M_DONTWAIT, MT_DATA); |
1736 | } |
1737 | if (m0 == NULL) { |
1738 | err = ENOMEM; |
1739 | goto err_srcmap; |
1740 | } |
1741 | if (len == MHLEN) |
1742 | M_DUP_PKTHDR(m0, cmd->srcu.src_m); |
1743 | if (totlen >= MINCLSIZE) { |
1744 | MCLGET(m0, M_DONTWAIT); |
1745 | if (m0->m_flags & M_EXT) |
1746 | len = MCLBYTES; |
1747 | } |
1748 | totlen -= len; |
1749 | m0->m_pkthdr.len = m0->m_len = len; |
1750 | mlast = m0; |
1751 | |
1752 | while (totlen > 0) { |
1753 | MGET(m, M_DONTWAIT, MT_DATA); |
1754 | if (m == NULL) { |
1755 | err = ENOMEM; |
1756 | m_freem(m0); |
1757 | goto err_srcmap; |
1758 | } |
1759 | len = MLEN; |
1760 | if (totlen >= MINCLSIZE) { |
1761 | MCLGET(m, M_DONTWAIT); |
1762 | if (m->m_flags & M_EXT) |
1763 | len = MCLBYTES; |
1764 | } |
1765 | |
1766 | m->m_len = len; |
1767 | if (m0->m_flags & M_PKTHDR) |
1768 | m0->m_pkthdr.len += len; |
1769 | totlen -= len; |
1770 | |
1771 | mlast->m_next = m; |
1772 | mlast = m; |
1773 | } |
1774 | cmd->dstu.dst_m = m0; |
1775 | } |
1776 | } |
1777 | |
1778 | if (cmd->dst_map == NULL) { |
1779 | if (bus_dmamap_create(sc->sc_dmat, |
1780 | HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER, |
1781 | HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { |
1782 | err = ENOMEM; |
1783 | goto err_srcmap; |
1784 | } |
1785 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
1786 | if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, |
1787 | cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { |
1788 | err = ENOMEM; |
1789 | goto err_dstmap1; |
1790 | } |
1791 | } else if (crp->crp_flags & CRYPTO_F_IOV) { |
1792 | if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, |
1793 | cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { |
1794 | err = ENOMEM; |
1795 | goto err_dstmap1; |
1796 | } |
1797 | } |
1798 | } |
1799 | |
1800 | #ifdef HIFN_DEBUG |
1801 | if (hifn_debug) |
1802 | printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n" , |
1803 | device_xname(sc->sc_dv), |
1804 | READ_REG_1(sc, HIFN_1_DMA_CSR), |
1805 | READ_REG_1(sc, HIFN_1_DMA_IER), |
1806 | dma->cmdu, dma->srcu, dma->dstu, dma->resu, |
1807 | cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs); |
1808 | #endif |
1809 | |
1810 | if (cmd->src_map == cmd->dst_map) |
1811 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
1812 | 0, cmd->src_map->dm_mapsize, |
1813 | BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); |
1814 | else { |
1815 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
1816 | 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
1817 | bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, |
1818 | 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
1819 | } |
1820 | |
1821 | /* |
1822 | * need 1 cmd, and 1 res |
1823 | * need N src, and N dst |
1824 | */ |
1825 | if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || |
1826 | (dma->resu + 1) > HIFN_D_RES_RSIZE) { |
1827 | err = ENOMEM; |
1828 | goto err_dstmap; |
1829 | } |
1830 | if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || |
1831 | (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) { |
1832 | err = ENOMEM; |
1833 | goto err_dstmap; |
1834 | } |
1835 | |
1836 | if (dma->cmdi == HIFN_D_CMD_RSIZE) { |
1837 | dma->cmdi = 0; |
1838 | dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | |
1839 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1840 | HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, |
1841 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1842 | } |
1843 | cmdi = dma->cmdi++; |
1844 | cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); |
1845 | HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); |
1846 | |
1847 | /* .p for command/result already set */ |
1848 | dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | |
1849 | HIFN_D_MASKDONEIRQ); |
1850 | HIFN_CMDR_SYNC(sc, cmdi, |
1851 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
1852 | dma->cmdu++; |
1853 | if (sc->sc_c_busy == 0) { |
1854 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); |
1855 | sc->sc_c_busy = 1; |
1856 | SET_LED(sc, HIFN_MIPSRST_LED0); |
1857 | } |
1858 | |
1859 | /* |
1860 | * We don't worry about missing an interrupt (which a "command wait" |
1861 | * interrupt salvages us from), unless there is more than one command |
1862 | * in the queue. |
1863 | * |
1864 | * XXX We do seem to miss some interrupts. So we always enable |
1865 | * XXX command wait. From OpenBSD revision 1.149. |
1866 | * |
1867 | */ |
1868 | #if 0 |
1869 | if (dma->cmdu > 1) { |
1870 | #endif |
1871 | sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; |
1872 | WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); |
1873 | #if 0 |
1874 | } |
1875 | #endif |
1876 | |
1877 | hifnstats.hst_ipackets++; |
1878 | hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; |
1879 | |
1880 | hifn_dmamap_load_src(sc, cmd); |
1881 | if (sc->sc_s_busy == 0) { |
1882 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); |
1883 | sc->sc_s_busy = 1; |
1884 | SET_LED(sc, HIFN_MIPSRST_LED1); |
1885 | } |
1886 | |
1887 | /* |
1888 | * Unlike other descriptors, we don't mask done interrupt from |
1889 | * result descriptor. |
1890 | */ |
1891 | #ifdef HIFN_DEBUG |
1892 | if (hifn_debug) |
1893 | printf("load res\n" ); |
1894 | #endif |
1895 | if (dma->resi == HIFN_D_RES_RSIZE) { |
1896 | dma->resi = 0; |
1897 | dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | |
1898 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
1899 | HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, |
1900 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1901 | } |
1902 | resi = dma->resi++; |
1903 | dma->hifn_commands[resi] = cmd; |
1904 | HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); |
1905 | dma->resr[resi].l = htole32(HIFN_MAX_RESULT | |
1906 | HIFN_D_VALID | HIFN_D_LAST); |
1907 | HIFN_RESR_SYNC(sc, resi, |
1908 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1909 | dma->resu++; |
1910 | if (sc->sc_r_busy == 0) { |
1911 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); |
1912 | sc->sc_r_busy = 1; |
1913 | SET_LED(sc, HIFN_MIPSRST_LED2); |
1914 | } |
1915 | |
1916 | if (cmd->sloplen) |
1917 | cmd->slopidx = resi; |
1918 | |
1919 | hifn_dmamap_load_dst(sc, cmd); |
1920 | |
1921 | if (sc->sc_d_busy == 0) { |
1922 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); |
1923 | sc->sc_d_busy = 1; |
1924 | } |
1925 | |
1926 | #ifdef HIFN_DEBUG |
1927 | if (hifn_debug) |
1928 | printf("%s: command: stat %8x ier %8x\n" , |
1929 | device_xname(sc->sc_dv), |
1930 | READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); |
1931 | #endif |
1932 | |
1933 | sc->sc_active = 5; |
1934 | return (err); /* success */ |
1935 | |
1936 | err_dstmap: |
1937 | if (cmd->src_map != cmd->dst_map) |
1938 | bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); |
1939 | err_dstmap1: |
1940 | if (cmd->src_map != cmd->dst_map) |
1941 | bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); |
1942 | err_srcmap: |
1943 | if (crp->crp_flags & CRYPTO_F_IMBUF && |
1944 | cmd->srcu.src_m != cmd->dstu.dst_m) |
1945 | m_freem(cmd->dstu.dst_m); |
1946 | bus_dmamap_unload(sc->sc_dmat, cmd->src_map); |
1947 | err_srcmap1: |
1948 | bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); |
1949 | return (err); |
1950 | } |
1951 | |
1952 | static void |
1953 | hifn_tick(void *vsc) |
1954 | { |
1955 | struct hifn_softc *sc = vsc; |
1956 | |
1957 | mutex_spin_enter(&sc->sc_mtx); |
1958 | if (sc->sc_active == 0) { |
1959 | struct hifn_dma *dma = sc->sc_dma; |
1960 | u_int32_t r = 0; |
1961 | |
1962 | if (dma->cmdu == 0 && sc->sc_c_busy) { |
1963 | sc->sc_c_busy = 0; |
1964 | r |= HIFN_DMACSR_C_CTRL_DIS; |
1965 | CLR_LED(sc, HIFN_MIPSRST_LED0); |
1966 | } |
1967 | if (dma->srcu == 0 && sc->sc_s_busy) { |
1968 | sc->sc_s_busy = 0; |
1969 | r |= HIFN_DMACSR_S_CTRL_DIS; |
1970 | CLR_LED(sc, HIFN_MIPSRST_LED1); |
1971 | } |
1972 | if (dma->dstu == 0 && sc->sc_d_busy) { |
1973 | sc->sc_d_busy = 0; |
1974 | r |= HIFN_DMACSR_D_CTRL_DIS; |
1975 | } |
1976 | if (dma->resu == 0 && sc->sc_r_busy) { |
1977 | sc->sc_r_busy = 0; |
1978 | r |= HIFN_DMACSR_R_CTRL_DIS; |
1979 | CLR_LED(sc, HIFN_MIPSRST_LED2); |
1980 | } |
1981 | if (r) |
1982 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); |
1983 | } |
1984 | else |
1985 | sc->sc_active--; |
1986 | #ifdef __OpenBSD__ |
1987 | timeout_add(&sc->sc_tickto, hz); |
1988 | #else |
1989 | callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); |
1990 | #endif |
1991 | mutex_spin_exit(&sc->sc_mtx); |
1992 | } |
1993 | |
1994 | static int |
1995 | hifn_intr(void *arg) |
1996 | { |
1997 | struct hifn_softc *sc = arg; |
1998 | struct hifn_dma *dma = sc->sc_dma; |
1999 | u_int32_t dmacsr, restart; |
2000 | int i, u; |
2001 | |
2002 | dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); |
2003 | |
2004 | #ifdef HIFN_DEBUG |
2005 | if (hifn_debug) |
2006 | printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n" , |
2007 | device_xname(sc->sc_dv), |
2008 | dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), |
2009 | dma->cmdu, dma->srcu, dma->dstu, dma->resu); |
2010 | #endif |
2011 | |
2012 | mutex_spin_enter(&sc->sc_mtx); |
2013 | |
2014 | /* Nothing in the DMA unit interrupted */ |
2015 | if ((dmacsr & sc->sc_dmaier) == 0) { |
2016 | mutex_spin_exit(&sc->sc_mtx); |
2017 | return (0); |
2018 | } |
2019 | |
2020 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); |
2021 | |
2022 | if (dmacsr & HIFN_DMACSR_ENGINE) |
2023 | WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR)); |
2024 | |
2025 | if ((sc->sc_flags & HIFN_HAS_PUBLIC) && |
2026 | (dmacsr & HIFN_DMACSR_PUBDONE)) |
2027 | WRITE_REG_1(sc, HIFN_1_PUB_STATUS, |
2028 | READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); |
2029 | |
2030 | restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); |
2031 | if (restart) |
2032 | printf("%s: overrun %x\n" , device_xname(sc->sc_dv), dmacsr); |
2033 | |
2034 | if (sc->sc_flags & HIFN_IS_7811) { |
2035 | if (dmacsr & HIFN_DMACSR_ILLR) |
2036 | printf("%s: illegal read\n" , device_xname(sc->sc_dv)); |
2037 | if (dmacsr & HIFN_DMACSR_ILLW) |
2038 | printf("%s: illegal write\n" , device_xname(sc->sc_dv)); |
2039 | } |
2040 | |
2041 | restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | |
2042 | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); |
2043 | if (restart) { |
2044 | printf("%s: abort, resetting.\n" , device_xname(sc->sc_dv)); |
2045 | hifnstats.hst_abort++; |
2046 | hifn_abort(sc); |
2047 | goto out; |
2048 | } |
2049 | |
2050 | if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) { |
2051 | /* |
2052 | * If no slots to process and we receive a "waiting on |
2053 | * command" interrupt, we disable the "waiting on command" |
2054 | * (by clearing it). |
2055 | */ |
2056 | sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; |
2057 | WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); |
2058 | } |
2059 | |
2060 | /* clear the rings */ |
2061 | i = dma->resk; |
2062 | while (dma->resu != 0) { |
2063 | HIFN_RESR_SYNC(sc, i, |
2064 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2065 | if (dma->resr[i].l & htole32(HIFN_D_VALID)) { |
2066 | HIFN_RESR_SYNC(sc, i, |
2067 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2068 | break; |
2069 | } |
2070 | |
2071 | if (i != HIFN_D_RES_RSIZE) { |
2072 | struct hifn_command *cmd; |
2073 | |
2074 | HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); |
2075 | cmd = dma->hifn_commands[i]; |
2076 | KASSERT(cmd != NULL |
2077 | /*("hifn_intr: null command slot %u", i)*/); |
2078 | dma->hifn_commands[i] = NULL; |
2079 | |
2080 | hifn_callback(sc, cmd, dma->result_bufs[i]); |
2081 | hifnstats.hst_opackets++; |
2082 | } |
2083 | |
2084 | if (++i == (HIFN_D_RES_RSIZE + 1)) |
2085 | i = 0; |
2086 | else |
2087 | dma->resu--; |
2088 | } |
2089 | dma->resk = i; |
2090 | |
2091 | i = dma->srck; u = dma->srcu; |
2092 | while (u != 0) { |
2093 | HIFN_SRCR_SYNC(sc, i, |
2094 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2095 | if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { |
2096 | HIFN_SRCR_SYNC(sc, i, |
2097 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2098 | break; |
2099 | } |
2100 | if (++i == (HIFN_D_SRC_RSIZE + 1)) |
2101 | i = 0; |
2102 | else |
2103 | u--; |
2104 | } |
2105 | dma->srck = i; dma->srcu = u; |
2106 | |
2107 | i = dma->cmdk; u = dma->cmdu; |
2108 | while (u != 0) { |
2109 | HIFN_CMDR_SYNC(sc, i, |
2110 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2111 | if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { |
2112 | HIFN_CMDR_SYNC(sc, i, |
2113 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2114 | break; |
2115 | } |
2116 | if (i != HIFN_D_CMD_RSIZE) { |
2117 | u--; |
2118 | HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); |
2119 | } |
2120 | if (++i == (HIFN_D_CMD_RSIZE + 1)) |
2121 | i = 0; |
2122 | } |
2123 | dma->cmdk = i; dma->cmdu = u; |
2124 | |
2125 | out: |
2126 | mutex_spin_exit(&sc->sc_mtx); |
2127 | return (1); |
2128 | } |
2129 | |
2130 | /* |
2131 | * Allocate a new 'session' and return an encoded session id. 'sidp' |
2132 | * contains our registration id, and should contain an encoded session |
2133 | * id on successful allocation. |
2134 | */ |
2135 | static int |
2136 | hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) |
2137 | { |
2138 | struct cryptoini *c; |
2139 | struct hifn_softc *sc = arg; |
2140 | int i, mac = 0, cry = 0, comp = 0, retval = EINVAL; |
2141 | |
2142 | KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/); |
2143 | if (sidp == NULL || cri == NULL || sc == NULL) |
2144 | return retval; |
2145 | |
2146 | mutex_spin_enter(&sc->sc_mtx); |
2147 | |
2148 | for (i = 0; i < sc->sc_maxses; i++) |
2149 | if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) |
2150 | break; |
2151 | if (i == sc->sc_maxses) { |
2152 | retval = ENOMEM; |
2153 | goto out; |
2154 | } |
2155 | |
2156 | for (c = cri; c != NULL; c = c->cri_next) { |
2157 | switch (c->cri_alg) { |
2158 | case CRYPTO_MD5: |
2159 | case CRYPTO_SHA1: |
2160 | case CRYPTO_MD5_HMAC_96: |
2161 | case CRYPTO_SHA1_HMAC_96: |
2162 | if (mac) { |
2163 | goto out; |
2164 | } |
2165 | mac = 1; |
2166 | break; |
2167 | case CRYPTO_DES_CBC: |
2168 | case CRYPTO_3DES_CBC: |
2169 | case CRYPTO_AES_CBC: |
2170 | /* Note that this is an initialization |
2171 | vector, not a cipher key; any function |
2172 | giving sufficient Hamming distance |
2173 | between outputs is fine. Use of RC4 |
2174 | to generate IVs has been FIPS140-2 |
2175 | certified by several labs. */ |
2176 | #ifdef __NetBSD__ |
2177 | cprng_fast(sc->sc_sessions[i].hs_iv, |
2178 | c->cri_alg == CRYPTO_AES_CBC ? |
2179 | HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); |
2180 | #else /* FreeBSD and OpenBSD have get_random_bytes */ |
2181 | /* XXX this may read fewer, does it matter? */ |
2182 | get_random_bytes(sc->sc_sessions[i].hs_iv, |
2183 | c->cri_alg == CRYPTO_AES_CBC ? |
2184 | HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); |
2185 | #endif |
2186 | /*FALLTHROUGH*/ |
2187 | case CRYPTO_ARC4: |
2188 | if (cry) { |
2189 | goto out; |
2190 | } |
2191 | cry = 1; |
2192 | break; |
2193 | #ifdef HAVE_CRYPTO_LZS |
2194 | case CRYPTO_LZS_COMP: |
2195 | if (comp) { |
2196 | goto out; |
2197 | } |
2198 | comp = 1; |
2199 | break; |
2200 | #endif |
2201 | default: |
2202 | goto out; |
2203 | } |
2204 | } |
2205 | if (mac == 0 && cry == 0 && comp == 0) { |
2206 | goto out; |
2207 | } |
2208 | |
2209 | /* |
2210 | * XXX only want to support compression without chaining to |
2211 | * MAC/crypt engine right now |
2212 | */ |
2213 | if ((comp && mac) || (comp && cry)) { |
2214 | goto out; |
2215 | } |
2216 | |
2217 | *sidp = HIFN_SID(device_unit(sc->sc_dv), i); |
2218 | sc->sc_sessions[i].hs_state = HS_STATE_USED; |
2219 | |
2220 | retval = 0; |
2221 | out: |
2222 | mutex_spin_exit(&sc->sc_mtx); |
2223 | return retval; |
2224 | } |
2225 | |
2226 | /* |
2227 | * Deallocate a session. |
2228 | * XXX this routine should run a zero'd mac/encrypt key into context ram. |
2229 | * XXX to blow away any keys already stored there. |
2230 | */ |
2231 | static int |
2232 | hifn_freesession(void *arg, u_int64_t tid) |
2233 | { |
2234 | struct hifn_softc *sc = arg; |
2235 | int session; |
2236 | u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; |
2237 | |
2238 | KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/); |
2239 | if (sc == NULL) |
2240 | return (EINVAL); |
2241 | |
2242 | mutex_spin_enter(&sc->sc_mtx); |
2243 | session = HIFN_SESSION(sid); |
2244 | if (session >= sc->sc_maxses) { |
2245 | mutex_spin_exit(&sc->sc_mtx); |
2246 | return (EINVAL); |
2247 | } |
2248 | |
2249 | memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session])); |
2250 | mutex_spin_exit(&sc->sc_mtx); |
2251 | return (0); |
2252 | } |
2253 | |
2254 | static int |
2255 | hifn_process(void *arg, struct cryptop *crp, int hint) |
2256 | { |
2257 | struct hifn_softc *sc = arg; |
2258 | struct hifn_command *cmd = NULL; |
2259 | int session, err, ivlen; |
2260 | struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; |
2261 | |
2262 | if (crp == NULL || crp->crp_callback == NULL) { |
2263 | hifnstats.hst_invalid++; |
2264 | return (EINVAL); |
2265 | } |
2266 | |
2267 | mutex_spin_enter(&sc->sc_mtx); |
2268 | session = HIFN_SESSION(crp->crp_sid); |
2269 | |
2270 | if (sc == NULL || session >= sc->sc_maxses) { |
2271 | err = EINVAL; |
2272 | goto errout; |
2273 | } |
2274 | |
2275 | cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command), |
2276 | M_DEVBUF, M_NOWAIT|M_ZERO); |
2277 | if (cmd == NULL) { |
2278 | hifnstats.hst_nomem++; |
2279 | err = ENOMEM; |
2280 | goto errout; |
2281 | } |
2282 | |
2283 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
2284 | cmd->srcu.src_m = (struct mbuf *)crp->crp_buf; |
2285 | cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf; |
2286 | } else if (crp->crp_flags & CRYPTO_F_IOV) { |
2287 | cmd->srcu.src_io = (struct uio *)crp->crp_buf; |
2288 | cmd->dstu.dst_io = (struct uio *)crp->crp_buf; |
2289 | } else { |
2290 | err = EINVAL; |
2291 | goto errout; /* XXX we don't handle contiguous buffers! */ |
2292 | } |
2293 | |
2294 | crd1 = crp->crp_desc; |
2295 | if (crd1 == NULL) { |
2296 | err = EINVAL; |
2297 | goto errout; |
2298 | } |
2299 | crd2 = crd1->crd_next; |
2300 | |
2301 | if (crd2 == NULL) { |
2302 | if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || |
2303 | crd1->crd_alg == CRYPTO_SHA1_HMAC_96 || |
2304 | crd1->crd_alg == CRYPTO_SHA1 || |
2305 | crd1->crd_alg == CRYPTO_MD5) { |
2306 | maccrd = crd1; |
2307 | enccrd = NULL; |
2308 | } else if (crd1->crd_alg == CRYPTO_DES_CBC || |
2309 | crd1->crd_alg == CRYPTO_3DES_CBC || |
2310 | crd1->crd_alg == CRYPTO_AES_CBC || |
2311 | crd1->crd_alg == CRYPTO_ARC4) { |
2312 | if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) |
2313 | cmd->base_masks |= HIFN_BASE_CMD_DECODE; |
2314 | maccrd = NULL; |
2315 | enccrd = crd1; |
2316 | #ifdef HAVE_CRYPTO_LZS |
2317 | } else if (crd1->crd_alg == CRYPTO_LZS_COMP) { |
2318 | return (hifn_compression(sc, crp, cmd)); |
2319 | #endif |
2320 | } else { |
2321 | err = EINVAL; |
2322 | goto errout; |
2323 | } |
2324 | } else { |
2325 | if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || |
2326 | crd1->crd_alg == CRYPTO_SHA1_HMAC_96 || |
2327 | crd1->crd_alg == CRYPTO_MD5 || |
2328 | crd1->crd_alg == CRYPTO_SHA1) && |
2329 | (crd2->crd_alg == CRYPTO_DES_CBC || |
2330 | crd2->crd_alg == CRYPTO_3DES_CBC || |
2331 | crd2->crd_alg == CRYPTO_AES_CBC || |
2332 | crd2->crd_alg == CRYPTO_ARC4) && |
2333 | ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { |
2334 | cmd->base_masks = HIFN_BASE_CMD_DECODE; |
2335 | maccrd = crd1; |
2336 | enccrd = crd2; |
2337 | } else if ((crd1->crd_alg == CRYPTO_DES_CBC || |
2338 | crd1->crd_alg == CRYPTO_ARC4 || |
2339 | crd1->crd_alg == CRYPTO_3DES_CBC || |
2340 | crd1->crd_alg == CRYPTO_AES_CBC) && |
2341 | (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || |
2342 | crd2->crd_alg == CRYPTO_SHA1_HMAC_96 || |
2343 | crd2->crd_alg == CRYPTO_MD5 || |
2344 | crd2->crd_alg == CRYPTO_SHA1) && |
2345 | (crd1->crd_flags & CRD_F_ENCRYPT)) { |
2346 | enccrd = crd1; |
2347 | maccrd = crd2; |
2348 | } else { |
2349 | /* |
2350 | * We cannot order the 7751 as requested |
2351 | */ |
2352 | err = EINVAL; |
2353 | goto errout; |
2354 | } |
2355 | } |
2356 | |
2357 | if (enccrd) { |
2358 | cmd->enccrd = enccrd; |
2359 | cmd->base_masks |= HIFN_BASE_CMD_CRYPT; |
2360 | switch (enccrd->crd_alg) { |
2361 | case CRYPTO_ARC4: |
2362 | cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; |
2363 | if ((enccrd->crd_flags & CRD_F_ENCRYPT) |
2364 | != sc->sc_sessions[session].hs_prev_op) |
2365 | sc->sc_sessions[session].hs_state = |
2366 | HS_STATE_USED; |
2367 | break; |
2368 | case CRYPTO_DES_CBC: |
2369 | cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | |
2370 | HIFN_CRYPT_CMD_MODE_CBC | |
2371 | HIFN_CRYPT_CMD_NEW_IV; |
2372 | break; |
2373 | case CRYPTO_3DES_CBC: |
2374 | cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | |
2375 | HIFN_CRYPT_CMD_MODE_CBC | |
2376 | HIFN_CRYPT_CMD_NEW_IV; |
2377 | break; |
2378 | case CRYPTO_AES_CBC: |
2379 | cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | |
2380 | HIFN_CRYPT_CMD_MODE_CBC | |
2381 | HIFN_CRYPT_CMD_NEW_IV; |
2382 | break; |
2383 | default: |
2384 | err = EINVAL; |
2385 | goto errout; |
2386 | } |
2387 | if (enccrd->crd_alg != CRYPTO_ARC4) { |
2388 | ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? |
2389 | HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); |
2390 | if (enccrd->crd_flags & CRD_F_ENCRYPT) { |
2391 | if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) |
2392 | memcpy(cmd->iv, enccrd->crd_iv, ivlen); |
2393 | else |
2394 | bcopy(sc->sc_sessions[session].hs_iv, |
2395 | cmd->iv, ivlen); |
2396 | |
2397 | if ((enccrd->crd_flags & CRD_F_IV_PRESENT) |
2398 | == 0) { |
2399 | if (crp->crp_flags & CRYPTO_F_IMBUF) |
2400 | m_copyback(cmd->srcu.src_m, |
2401 | enccrd->crd_inject, |
2402 | ivlen, cmd->iv); |
2403 | else if (crp->crp_flags & CRYPTO_F_IOV) |
2404 | cuio_copyback(cmd->srcu.src_io, |
2405 | enccrd->crd_inject, |
2406 | ivlen, cmd->iv); |
2407 | } |
2408 | } else { |
2409 | if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) |
2410 | memcpy(cmd->iv, enccrd->crd_iv, ivlen); |
2411 | else if (crp->crp_flags & CRYPTO_F_IMBUF) |
2412 | m_copydata(cmd->srcu.src_m, |
2413 | enccrd->crd_inject, ivlen, cmd->iv); |
2414 | else if (crp->crp_flags & CRYPTO_F_IOV) |
2415 | cuio_copydata(cmd->srcu.src_io, |
2416 | enccrd->crd_inject, ivlen, cmd->iv); |
2417 | } |
2418 | } |
2419 | |
2420 | cmd->ck = enccrd->crd_key; |
2421 | cmd->cklen = enccrd->crd_klen >> 3; |
2422 | |
2423 | /* |
2424 | * Need to specify the size for the AES key in the masks. |
2425 | */ |
2426 | if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == |
2427 | HIFN_CRYPT_CMD_ALG_AES) { |
2428 | switch (cmd->cklen) { |
2429 | case 16: |
2430 | cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; |
2431 | break; |
2432 | case 24: |
2433 | cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; |
2434 | break; |
2435 | case 32: |
2436 | cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; |
2437 | break; |
2438 | default: |
2439 | err = EINVAL; |
2440 | goto errout; |
2441 | } |
2442 | } |
2443 | |
2444 | if (sc->sc_sessions[session].hs_state == HS_STATE_USED) |
2445 | cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; |
2446 | } |
2447 | |
2448 | if (maccrd) { |
2449 | cmd->maccrd = maccrd; |
2450 | cmd->base_masks |= HIFN_BASE_CMD_MAC; |
2451 | |
2452 | switch (maccrd->crd_alg) { |
2453 | case CRYPTO_MD5: |
2454 | cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | |
2455 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | |
2456 | HIFN_MAC_CMD_POS_IPSEC; |
2457 | break; |
2458 | case CRYPTO_MD5_HMAC_96: |
2459 | cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | |
2460 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | |
2461 | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; |
2462 | break; |
2463 | case CRYPTO_SHA1: |
2464 | cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | |
2465 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | |
2466 | HIFN_MAC_CMD_POS_IPSEC; |
2467 | break; |
2468 | case CRYPTO_SHA1_HMAC_96: |
2469 | cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | |
2470 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | |
2471 | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; |
2472 | break; |
2473 | } |
2474 | |
2475 | if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 || |
2476 | maccrd->crd_alg == CRYPTO_MD5_HMAC_96) && |
2477 | sc->sc_sessions[session].hs_state == HS_STATE_USED) { |
2478 | cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; |
2479 | memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3); |
2480 | memset(cmd->mac + (maccrd->crd_klen >> 3), 0, |
2481 | HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); |
2482 | } |
2483 | } |
2484 | |
2485 | cmd->crp = crp; |
2486 | cmd->session_num = session; |
2487 | cmd->softc = sc; |
2488 | |
2489 | err = hifn_crypto(sc, cmd, crp, hint); |
2490 | if (err == 0) { |
2491 | if (enccrd) |
2492 | sc->sc_sessions[session].hs_prev_op = |
2493 | enccrd->crd_flags & CRD_F_ENCRYPT; |
2494 | if (sc->sc_sessions[session].hs_state == HS_STATE_USED) |
2495 | sc->sc_sessions[session].hs_state = HS_STATE_KEY; |
2496 | mutex_spin_exit(&sc->sc_mtx); |
2497 | return 0; |
2498 | } else if (err == ERESTART) { |
2499 | /* |
2500 | * There weren't enough resources to dispatch the request |
2501 | * to the part. Notify the caller so they'll requeue this |
2502 | * request and resubmit it again soon. |
2503 | */ |
2504 | #ifdef HIFN_DEBUG |
2505 | if (hifn_debug) |
2506 | printf("%s: requeue request\n" , device_xname(sc->sc_dv)); |
2507 | #endif |
2508 | free(cmd, M_DEVBUF); |
2509 | sc->sc_needwakeup |= CRYPTO_SYMQ; |
2510 | mutex_spin_exit(&sc->sc_mtx); |
2511 | return (err); |
2512 | } |
2513 | |
2514 | errout: |
2515 | if (cmd != NULL) |
2516 | free(cmd, M_DEVBUF); |
2517 | if (err == EINVAL) |
2518 | hifnstats.hst_invalid++; |
2519 | else |
2520 | hifnstats.hst_nomem++; |
2521 | crp->crp_etype = err; |
2522 | mutex_spin_exit(&sc->sc_mtx); |
2523 | crypto_done(crp); |
2524 | return (0); |
2525 | } |
2526 | |
2527 | static void |
2528 | hifn_abort(struct hifn_softc *sc) |
2529 | { |
2530 | struct hifn_dma *dma = sc->sc_dma; |
2531 | struct hifn_command *cmd; |
2532 | struct cryptop *crp; |
2533 | int i, u; |
2534 | |
2535 | i = dma->resk; u = dma->resu; |
2536 | while (u != 0) { |
2537 | cmd = dma->hifn_commands[i]; |
2538 | KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/); |
2539 | dma->hifn_commands[i] = NULL; |
2540 | crp = cmd->crp; |
2541 | |
2542 | if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { |
2543 | /* Salvage what we can. */ |
2544 | hifnstats.hst_opackets++; |
2545 | hifn_callback(sc, cmd, dma->result_bufs[i]); |
2546 | } else { |
2547 | if (cmd->src_map == cmd->dst_map) { |
2548 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
2549 | 0, cmd->src_map->dm_mapsize, |
2550 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
2551 | } else { |
2552 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
2553 | 0, cmd->src_map->dm_mapsize, |
2554 | BUS_DMASYNC_POSTWRITE); |
2555 | bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, |
2556 | 0, cmd->dst_map->dm_mapsize, |
2557 | BUS_DMASYNC_POSTREAD); |
2558 | } |
2559 | |
2560 | if (cmd->srcu.src_m != cmd->dstu.dst_m) { |
2561 | m_freem(cmd->srcu.src_m); |
2562 | crp->crp_buf = (void *)cmd->dstu.dst_m; |
2563 | } |
2564 | |
2565 | /* non-shared buffers cannot be restarted */ |
2566 | if (cmd->src_map != cmd->dst_map) { |
2567 | /* |
2568 | * XXX should be EAGAIN, delayed until |
2569 | * after the reset. |
2570 | */ |
2571 | crp->crp_etype = ENOMEM; |
2572 | bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); |
2573 | bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); |
2574 | } else |
2575 | crp->crp_etype = ENOMEM; |
2576 | |
2577 | bus_dmamap_unload(sc->sc_dmat, cmd->src_map); |
2578 | bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); |
2579 | |
2580 | free(cmd, M_DEVBUF); |
2581 | if (crp->crp_etype != EAGAIN) |
2582 | crypto_done(crp); |
2583 | } |
2584 | |
2585 | if (++i == HIFN_D_RES_RSIZE) |
2586 | i = 0; |
2587 | u--; |
2588 | } |
2589 | dma->resk = i; dma->resu = u; |
2590 | |
2591 | /* Force upload of key next time */ |
2592 | for (i = 0; i < sc->sc_maxses; i++) |
2593 | if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) |
2594 | sc->sc_sessions[i].hs_state = HS_STATE_USED; |
2595 | |
2596 | hifn_reset_board(sc, 1); |
2597 | hifn_init_dma(sc); |
2598 | hifn_init_pci_registers(sc); |
2599 | } |
2600 | |
2601 | static void |
2602 | hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf) |
2603 | { |
2604 | struct hifn_dma *dma = sc->sc_dma; |
2605 | struct cryptop *crp = cmd->crp; |
2606 | struct cryptodesc *crd; |
2607 | struct mbuf *m; |
2608 | int totlen, i, u, ivlen; |
2609 | |
2610 | if (cmd->src_map == cmd->dst_map) |
2611 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
2612 | 0, cmd->src_map->dm_mapsize, |
2613 | BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); |
2614 | else { |
2615 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
2616 | 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
2617 | bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, |
2618 | 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2619 | } |
2620 | |
2621 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
2622 | if (cmd->srcu.src_m != cmd->dstu.dst_m) { |
2623 | crp->crp_buf = (void *)cmd->dstu.dst_m; |
2624 | totlen = cmd->src_map->dm_mapsize; |
2625 | for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) { |
2626 | if (totlen < m->m_len) { |
2627 | m->m_len = totlen; |
2628 | totlen = 0; |
2629 | } else |
2630 | totlen -= m->m_len; |
2631 | } |
2632 | cmd->dstu.dst_m->m_pkthdr.len = |
2633 | cmd->srcu.src_m->m_pkthdr.len; |
2634 | m_freem(cmd->srcu.src_m); |
2635 | } |
2636 | } |
2637 | |
2638 | if (cmd->sloplen != 0) { |
2639 | if (crp->crp_flags & CRYPTO_F_IMBUF) |
2640 | m_copyback((struct mbuf *)crp->crp_buf, |
2641 | cmd->src_map->dm_mapsize - cmd->sloplen, |
2642 | cmd->sloplen, (void *)&dma->slop[cmd->slopidx]); |
2643 | else if (crp->crp_flags & CRYPTO_F_IOV) |
2644 | cuio_copyback((struct uio *)crp->crp_buf, |
2645 | cmd->src_map->dm_mapsize - cmd->sloplen, |
2646 | cmd->sloplen, (void *)&dma->slop[cmd->slopidx]); |
2647 | } |
2648 | |
2649 | i = dma->dstk; u = dma->dstu; |
2650 | while (u != 0) { |
2651 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
2652 | offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), |
2653 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2654 | if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { |
2655 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
2656 | offsetof(struct hifn_dma, dstr[i]), |
2657 | sizeof(struct hifn_desc), |
2658 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2659 | break; |
2660 | } |
2661 | if (++i == (HIFN_D_DST_RSIZE + 1)) |
2662 | i = 0; |
2663 | else |
2664 | u--; |
2665 | } |
2666 | dma->dstk = i; dma->dstu = u; |
2667 | |
2668 | hifnstats.hst_obytes += cmd->dst_map->dm_mapsize; |
2669 | |
2670 | if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == |
2671 | HIFN_BASE_CMD_CRYPT) { |
2672 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { |
2673 | if (crd->crd_alg != CRYPTO_DES_CBC && |
2674 | crd->crd_alg != CRYPTO_3DES_CBC && |
2675 | crd->crd_alg != CRYPTO_AES_CBC) |
2676 | continue; |
2677 | ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? |
2678 | HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); |
2679 | if (crp->crp_flags & CRYPTO_F_IMBUF) |
2680 | m_copydata((struct mbuf *)crp->crp_buf, |
2681 | crd->crd_skip + crd->crd_len - ivlen, |
2682 | ivlen, |
2683 | cmd->softc->sc_sessions[cmd->session_num].hs_iv); |
2684 | else if (crp->crp_flags & CRYPTO_F_IOV) { |
2685 | cuio_copydata((struct uio *)crp->crp_buf, |
2686 | crd->crd_skip + crd->crd_len - ivlen, |
2687 | ivlen, |
2688 | cmd->softc->sc_sessions[cmd->session_num].hs_iv); |
2689 | } |
2690 | /* XXX We do not handle contig data */ |
2691 | break; |
2692 | } |
2693 | } |
2694 | |
2695 | if (cmd->base_masks & HIFN_BASE_CMD_MAC) { |
2696 | u_int8_t *macbuf; |
2697 | |
2698 | macbuf = resbuf + sizeof(struct hifn_base_result); |
2699 | if (cmd->base_masks & HIFN_BASE_CMD_COMP) |
2700 | macbuf += sizeof(struct hifn_comp_result); |
2701 | macbuf += sizeof(struct hifn_mac_result); |
2702 | |
2703 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { |
2704 | int len; |
2705 | |
2706 | if (crd->crd_alg == CRYPTO_MD5) |
2707 | len = 16; |
2708 | else if (crd->crd_alg == CRYPTO_SHA1) |
2709 | len = 20; |
2710 | else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 || |
2711 | crd->crd_alg == CRYPTO_SHA1_HMAC_96) |
2712 | len = 12; |
2713 | else |
2714 | continue; |
2715 | |
2716 | if (crp->crp_flags & CRYPTO_F_IMBUF) |
2717 | m_copyback((struct mbuf *)crp->crp_buf, |
2718 | crd->crd_inject, len, macbuf); |
2719 | else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) |
2720 | memcpy(crp->crp_mac, (void *)macbuf, len); |
2721 | break; |
2722 | } |
2723 | } |
2724 | |
2725 | if (cmd->src_map != cmd->dst_map) { |
2726 | bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); |
2727 | bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); |
2728 | } |
2729 | bus_dmamap_unload(sc->sc_dmat, cmd->src_map); |
2730 | bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); |
2731 | free(cmd, M_DEVBUF); |
2732 | crypto_done(crp); |
2733 | } |
2734 | |
2735 | #ifdef HAVE_CRYPTO_LZS |
2736 | |
2737 | static int |
2738 | hifn_compression(struct hifn_softc *sc, struct cryptop *crp, |
2739 | struct hifn_command *cmd) |
2740 | { |
2741 | struct cryptodesc *crd = crp->crp_desc; |
2742 | int s, err = 0; |
2743 | |
2744 | cmd->compcrd = crd; |
2745 | cmd->base_masks |= HIFN_BASE_CMD_COMP; |
2746 | |
2747 | if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) { |
2748 | /* |
2749 | * XXX can only handle mbufs right now since we can |
2750 | * XXX dynamically resize them. |
2751 | */ |
2752 | err = EINVAL; |
2753 | return (ENOMEM); |
2754 | } |
2755 | |
2756 | if ((crd->crd_flags & CRD_F_COMP) == 0) |
2757 | cmd->base_masks |= HIFN_BASE_CMD_DECODE; |
2758 | if (crd->crd_alg == CRYPTO_LZS_COMP) |
2759 | cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS | |
2760 | HIFN_COMP_CMD_CLEARHIST; |
2761 | |
2762 | if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, |
2763 | HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) { |
2764 | err = ENOMEM; |
2765 | goto fail; |
2766 | } |
2767 | |
2768 | if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, |
2769 | HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { |
2770 | err = ENOMEM; |
2771 | goto fail; |
2772 | } |
2773 | |
2774 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
2775 | int len; |
2776 | |
2777 | if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, |
2778 | cmd->srcu.src_m, BUS_DMA_NOWAIT)) { |
2779 | err = ENOMEM; |
2780 | goto fail; |
2781 | } |
2782 | |
2783 | len = cmd->src_map->dm_mapsize / MCLBYTES; |
2784 | if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0) |
2785 | len++; |
2786 | len *= MCLBYTES; |
2787 | |
2788 | if ((crd->crd_flags & CRD_F_COMP) == 0) |
2789 | len *= 4; |
2790 | |
2791 | if (len > HIFN_MAX_DMALEN) |
2792 | len = HIFN_MAX_DMALEN; |
2793 | |
2794 | cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m); |
2795 | if (cmd->dstu.dst_m == NULL) { |
2796 | err = ENOMEM; |
2797 | goto fail; |
2798 | } |
2799 | |
2800 | if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, |
2801 | cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { |
2802 | err = ENOMEM; |
2803 | goto fail; |
2804 | } |
2805 | } else if (crp->crp_flags & CRYPTO_F_IOV) { |
2806 | if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, |
2807 | cmd->srcu.src_io, BUS_DMA_NOWAIT)) { |
2808 | err = ENOMEM; |
2809 | goto fail; |
2810 | } |
2811 | if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, |
2812 | cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { |
2813 | err = ENOMEM; |
2814 | goto fail; |
2815 | } |
2816 | } |
2817 | |
2818 | if (cmd->src_map == cmd->dst_map) |
2819 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
2820 | 0, cmd->src_map->dm_mapsize, |
2821 | BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); |
2822 | else { |
2823 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
2824 | 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2825 | bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, |
2826 | 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
2827 | } |
2828 | |
2829 | cmd->crp = crp; |
2830 | /* |
2831 | * Always use session 0. The modes of compression we use are |
2832 | * stateless and there is always at least one compression |
2833 | * context, zero. |
2834 | */ |
2835 | cmd->session_num = 0; |
2836 | cmd->softc = sc; |
2837 | |
2838 | err = hifn_compress_enter(sc, cmd); |
2839 | |
2840 | if (err != 0) |
2841 | goto fail; |
2842 | return (0); |
2843 | |
2844 | fail: |
2845 | if (cmd->dst_map != NULL) { |
2846 | if (cmd->dst_map->dm_nsegs > 0) |
2847 | bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); |
2848 | bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); |
2849 | } |
2850 | if (cmd->src_map != NULL) { |
2851 | if (cmd->src_map->dm_nsegs > 0) |
2852 | bus_dmamap_unload(sc->sc_dmat, cmd->src_map); |
2853 | bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); |
2854 | } |
2855 | free(cmd, M_DEVBUF); |
2856 | if (err == EINVAL) |
2857 | hifnstats.hst_invalid++; |
2858 | else |
2859 | hifnstats.hst_nomem++; |
2860 | crp->crp_etype = err; |
2861 | crypto_done(crp); |
2862 | return (0); |
2863 | } |
2864 | |
2865 | static int |
2866 | hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd) |
2867 | { |
2868 | struct hifn_dma *dma = sc->sc_dma; |
2869 | int cmdi, resi; |
2870 | u_int32_t cmdlen; |
2871 | |
2872 | if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || |
2873 | (dma->resu + 1) > HIFN_D_CMD_RSIZE) |
2874 | return (ENOMEM); |
2875 | |
2876 | if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || |
2877 | (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE) |
2878 | return (ENOMEM); |
2879 | |
2880 | if (dma->cmdi == HIFN_D_CMD_RSIZE) { |
2881 | dma->cmdi = 0; |
2882 | dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | |
2883 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
2884 | HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, |
2885 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
2886 | } |
2887 | cmdi = dma->cmdi++; |
2888 | cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); |
2889 | HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); |
2890 | |
2891 | /* .p for command/result already set */ |
2892 | dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | |
2893 | HIFN_D_MASKDONEIRQ); |
2894 | HIFN_CMDR_SYNC(sc, cmdi, |
2895 | BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); |
2896 | dma->cmdu++; |
2897 | if (sc->sc_c_busy == 0) { |
2898 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); |
2899 | sc->sc_c_busy = 1; |
2900 | SET_LED(sc, HIFN_MIPSRST_LED0); |
2901 | } |
2902 | |
2903 | /* |
2904 | * We don't worry about missing an interrupt (which a "command wait" |
2905 | * interrupt salvages us from), unless there is more than one command |
2906 | * in the queue. |
2907 | */ |
2908 | if (dma->cmdu > 1) { |
2909 | sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; |
2910 | WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); |
2911 | } |
2912 | |
2913 | hifnstats.hst_ipackets++; |
2914 | hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; |
2915 | |
2916 | hifn_dmamap_load_src(sc, cmd); |
2917 | if (sc->sc_s_busy == 0) { |
2918 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); |
2919 | sc->sc_s_busy = 1; |
2920 | SET_LED(sc, HIFN_MIPSRST_LED1); |
2921 | } |
2922 | |
2923 | /* |
2924 | * Unlike other descriptors, we don't mask done interrupt from |
2925 | * result descriptor. |
2926 | */ |
2927 | if (dma->resi == HIFN_D_RES_RSIZE) { |
2928 | dma->resi = 0; |
2929 | dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | |
2930 | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); |
2931 | HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, |
2932 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2933 | } |
2934 | resi = dma->resi++; |
2935 | dma->hifn_commands[resi] = cmd; |
2936 | HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); |
2937 | dma->resr[resi].l = htole32(HIFN_MAX_RESULT | |
2938 | HIFN_D_VALID | HIFN_D_LAST); |
2939 | HIFN_RESR_SYNC(sc, resi, |
2940 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2941 | dma->resu++; |
2942 | if (sc->sc_r_busy == 0) { |
2943 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); |
2944 | sc->sc_r_busy = 1; |
2945 | SET_LED(sc, HIFN_MIPSRST_LED2); |
2946 | } |
2947 | |
2948 | if (cmd->sloplen) |
2949 | cmd->slopidx = resi; |
2950 | |
2951 | hifn_dmamap_load_dst(sc, cmd); |
2952 | |
2953 | if (sc->sc_d_busy == 0) { |
2954 | WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); |
2955 | sc->sc_d_busy = 1; |
2956 | } |
2957 | sc->sc_active = 5; |
2958 | cmd->cmd_callback = hifn_callback_comp; |
2959 | return (0); |
2960 | } |
2961 | |
2962 | static void |
2963 | hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd, |
2964 | u_int8_t *resbuf) |
2965 | { |
2966 | struct hifn_base_result baseres; |
2967 | struct cryptop *crp = cmd->crp; |
2968 | struct hifn_dma *dma = sc->sc_dma; |
2969 | struct mbuf *m; |
2970 | int err = 0, i, u; |
2971 | u_int32_t olen; |
2972 | bus_size_t dstsize; |
2973 | |
2974 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
2975 | 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
2976 | bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, |
2977 | 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2978 | |
2979 | dstsize = cmd->dst_map->dm_mapsize; |
2980 | bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); |
2981 | |
2982 | memcpy(&baseres, resbuf, sizeof(struct hifn_base_result)); |
2983 | |
2984 | i = dma->dstk; u = dma->dstu; |
2985 | while (u != 0) { |
2986 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
2987 | offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), |
2988 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2989 | if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { |
2990 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, |
2991 | offsetof(struct hifn_dma, dstr[i]), |
2992 | sizeof(struct hifn_desc), |
2993 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2994 | break; |
2995 | } |
2996 | if (++i == (HIFN_D_DST_RSIZE + 1)) |
2997 | i = 0; |
2998 | else |
2999 | u--; |
3000 | } |
3001 | dma->dstk = i; dma->dstu = u; |
3002 | |
3003 | if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) { |
3004 | bus_size_t xlen; |
3005 | |
3006 | xlen = dstsize; |
3007 | |
3008 | m_freem(cmd->dstu.dst_m); |
3009 | |
3010 | if (xlen == HIFN_MAX_DMALEN) { |
3011 | /* We've done all we can. */ |
3012 | err = E2BIG; |
3013 | goto out; |
3014 | } |
3015 | |
3016 | xlen += MCLBYTES; |
3017 | |
3018 | if (xlen > HIFN_MAX_DMALEN) |
3019 | xlen = HIFN_MAX_DMALEN; |
3020 | |
3021 | cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen, |
3022 | cmd->srcu.src_m); |
3023 | if (cmd->dstu.dst_m == NULL) { |
3024 | err = ENOMEM; |
3025 | goto out; |
3026 | } |
3027 | if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, |
3028 | cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { |
3029 | err = ENOMEM; |
3030 | goto out; |
3031 | } |
3032 | |
3033 | bus_dmamap_sync(sc->sc_dmat, cmd->src_map, |
3034 | 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
3035 | bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, |
3036 | 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
3037 | |
3038 | err = hifn_compress_enter(sc, cmd); |
3039 | if (err != 0) |
3040 | goto out; |
3041 | return; |
3042 | } |
3043 | |
3044 | olen = dstsize - (letoh16(baseres.dst_cnt) | |
3045 | (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >> |
3046 | HIFN_BASE_RES_DSTLEN_S) << 16)); |
3047 | |
3048 | crp->crp_olen = olen - cmd->compcrd->crd_skip; |
3049 | |
3050 | bus_dmamap_unload(sc->sc_dmat, cmd->src_map); |
3051 | bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); |
3052 | bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); |
3053 | |
3054 | m = cmd->dstu.dst_m; |
3055 | if (m->m_flags & M_PKTHDR) |
3056 | m->m_pkthdr.len = olen; |
3057 | crp->crp_buf = (void *)m; |
3058 | for (; m != NULL; m = m->m_next) { |
3059 | if (olen >= m->m_len) |
3060 | olen -= m->m_len; |
3061 | else { |
3062 | m->m_len = olen; |
3063 | olen = 0; |
3064 | } |
3065 | } |
3066 | |
3067 | m_freem(cmd->srcu.src_m); |
3068 | free(cmd, M_DEVBUF); |
3069 | crp->crp_etype = 0; |
3070 | crypto_done(crp); |
3071 | return; |
3072 | |
3073 | out: |
3074 | if (cmd->dst_map != NULL) { |
3075 | if (cmd->src_map->dm_nsegs != 0) |
3076 | bus_dmamap_unload(sc->sc_dmat, cmd->src_map); |
3077 | bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); |
3078 | } |
3079 | if (cmd->src_map != NULL) { |
3080 | if (cmd->src_map->dm_nsegs != 0) |
3081 | bus_dmamap_unload(sc->sc_dmat, cmd->src_map); |
3082 | bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); |
3083 | } |
3084 | if (cmd->dstu.dst_m != NULL) |
3085 | m_freem(cmd->dstu.dst_m); |
3086 | free(cmd, M_DEVBUF); |
3087 | crp->crp_etype = err; |
3088 | crypto_done(crp); |
3089 | } |
3090 | |
3091 | static struct mbuf * |
3092 | hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate) |
3093 | { |
3094 | int len; |
3095 | struct mbuf *m, *m0, *mlast; |
3096 | |
3097 | if (mtemplate->m_flags & M_PKTHDR) { |
3098 | len = MHLEN; |
3099 | MGETHDR(m0, M_DONTWAIT, MT_DATA); |
3100 | } else { |
3101 | len = MLEN; |
3102 | MGET(m0, M_DONTWAIT, MT_DATA); |
3103 | } |
3104 | if (m0 == NULL) |
3105 | return (NULL); |
3106 | if (len == MHLEN) |
3107 | M_DUP_PKTHDR(m0, mtemplate); |
3108 | MCLGET(m0, M_DONTWAIT); |
3109 | if (!(m0->m_flags & M_EXT)) { |
3110 | m_freem(m0); |
3111 | return (NULL); |
3112 | } |
3113 | len = MCLBYTES; |
3114 | |
3115 | totlen -= len; |
3116 | m0->m_pkthdr.len = m0->m_len = len; |
3117 | mlast = m0; |
3118 | |
3119 | while (totlen > 0) { |
3120 | MGET(m, M_DONTWAIT, MT_DATA); |
3121 | if (m == NULL) { |
3122 | m_freem(m0); |
3123 | return (NULL); |
3124 | } |
3125 | MCLGET(m, M_DONTWAIT); |
3126 | if (!(m->m_flags & M_EXT)) { |
3127 | m_freem(m); |
3128 | m_freem(m0); |
3129 | return (NULL); |
3130 | } |
3131 | len = MCLBYTES; |
3132 | m->m_len = len; |
3133 | if (m0->m_flags & M_PKTHDR) |
3134 | m0->m_pkthdr.len += len; |
3135 | totlen -= len; |
3136 | |
3137 | mlast->m_next = m; |
3138 | mlast = m; |
3139 | } |
3140 | |
3141 | return (m0); |
3142 | } |
3143 | #endif /* HAVE_CRYPTO_LZS */ |
3144 | |
3145 | static void |
3146 | hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val) |
3147 | { |
3148 | /* |
3149 | * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 |
3150 | * and Group 1 registers; avoid conditions that could create |
3151 | * burst writes by doing a read in between the writes. |
3152 | */ |
3153 | if (sc->sc_flags & HIFN_NO_BURSTWRITE) { |
3154 | if (sc->sc_waw_lastgroup == reggrp && |
3155 | sc->sc_waw_lastreg == reg - 4) { |
3156 | bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); |
3157 | } |
3158 | sc->sc_waw_lastgroup = reggrp; |
3159 | sc->sc_waw_lastreg = reg; |
3160 | } |
3161 | if (reggrp == 0) |
3162 | bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); |
3163 | else |
3164 | bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); |
3165 | |
3166 | } |
3167 | |
3168 | static u_int32_t |
3169 | hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg) |
3170 | { |
3171 | if (sc->sc_flags & HIFN_NO_BURSTWRITE) { |
3172 | sc->sc_waw_lastgroup = -1; |
3173 | sc->sc_waw_lastreg = 1; |
3174 | } |
3175 | if (reggrp == 0) |
3176 | return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg)); |
3177 | return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg)); |
3178 | } |
3179 | |