1/* $NetBSD: sdhc.c,v 1.95 2016/08/10 04:24:17 nonaka Exp $ */
2/* $OpenBSD: sdhc.c,v 1.25 2009/01/13 19:44:20 grange Exp $ */
3
4/*
5 * Copyright (c) 2006 Uwe Stuehler <uwe@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*
21 * SD Host Controller driver based on the SD Host Controller Standard
22 * Simplified Specification Version 1.00 (www.sdcard.com).
23 */
24
25#include <sys/cdefs.h>
26__KERNEL_RCSID(0, "$NetBSD: sdhc.c,v 1.95 2016/08/10 04:24:17 nonaka Exp $");
27
28#ifdef _KERNEL_OPT
29#include "opt_sdmmc.h"
30#endif
31
32#include <sys/param.h>
33#include <sys/device.h>
34#include <sys/kernel.h>
35#include <sys/malloc.h>
36#include <sys/systm.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/atomic.h>
40
41#include <dev/sdmmc/sdhcreg.h>
42#include <dev/sdmmc/sdhcvar.h>
43#include <dev/sdmmc/sdmmcchip.h>
44#include <dev/sdmmc/sdmmcreg.h>
45#include <dev/sdmmc/sdmmcvar.h>
46
47#ifdef SDHC_DEBUG
48int sdhcdebug = 1;
49#define DPRINTF(n,s) do { if ((n) <= sdhcdebug) printf s; } while (0)
50void sdhc_dump_regs(struct sdhc_host *);
51#else
52#define DPRINTF(n,s) do {} while (0)
53#endif
54
55#define SDHC_COMMAND_TIMEOUT hz
56#define SDHC_BUFFER_TIMEOUT hz
57#define SDHC_TRANSFER_TIMEOUT hz
58#define SDHC_DMA_TIMEOUT (hz*3)
59#define SDHC_TUNING_TIMEOUT hz
60
61struct sdhc_host {
62 struct sdhc_softc *sc; /* host controller device */
63
64 bus_space_tag_t iot; /* host register set tag */
65 bus_space_handle_t ioh; /* host register set handle */
66 bus_size_t ios; /* host register space size */
67 bus_dma_tag_t dmat; /* host DMA tag */
68
69 device_t sdmmc; /* generic SD/MMC device */
70
71 u_int clkbase; /* base clock frequency in KHz */
72 int maxblklen; /* maximum block length */
73 uint32_t ocr; /* OCR value from capabilities */
74
75 uint8_t regs[14]; /* host controller state */
76
77 uint16_t intr_status; /* soft interrupt status */
78 uint16_t intr_error_status; /* soft error status */
79 kmutex_t intr_lock;
80 kcondvar_t intr_cv;
81
82 callout_t tuning_timer;
83 int tuning_timing;
84 u_int tuning_timer_count;
85 u_int tuning_timer_pending;
86
87 int specver; /* spec. version */
88
89 uint32_t flags; /* flags for this host */
90#define SHF_USE_DMA 0x0001
91#define SHF_USE_4BIT_MODE 0x0002
92#define SHF_USE_8BIT_MODE 0x0004
93#define SHF_MODE_DMAEN 0x0008 /* needs SDHC_DMA_ENABLE in mode */
94#define SHF_USE_ADMA2_32 0x0010
95#define SHF_USE_ADMA2_64 0x0020
96#define SHF_USE_ADMA2_MASK 0x0030
97
98 bus_dmamap_t adma_map;
99 bus_dma_segment_t adma_segs[1];
100 void *adma2;
101};
102
103#define HDEVNAME(hp) (device_xname((hp)->sc->sc_dev))
104
105static uint8_t
106hread1(struct sdhc_host *hp, bus_size_t reg)
107{
108
109 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
110 return bus_space_read_1(hp->iot, hp->ioh, reg);
111 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 3));
112}
113
114static uint16_t
115hread2(struct sdhc_host *hp, bus_size_t reg)
116{
117
118 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS))
119 return bus_space_read_2(hp->iot, hp->ioh, reg);
120 return bus_space_read_4(hp->iot, hp->ioh, reg & -4) >> (8 * (reg & 2));
121}
122
123#define HREAD1(hp, reg) hread1(hp, reg)
124#define HREAD2(hp, reg) hread2(hp, reg)
125#define HREAD4(hp, reg) \
126 (bus_space_read_4((hp)->iot, (hp)->ioh, (reg)))
127
128
129static void
130hwrite1(struct sdhc_host *hp, bus_size_t o, uint8_t val)
131{
132
133 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
134 bus_space_write_1(hp->iot, hp->ioh, o, val);
135 } else {
136 const size_t shift = 8 * (o & 3);
137 o &= -4;
138 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
139 tmp = (val << shift) | (tmp & ~(0xff << shift));
140 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
141 }
142}
143
144static void
145hwrite2(struct sdhc_host *hp, bus_size_t o, uint16_t val)
146{
147
148 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
149 bus_space_write_2(hp->iot, hp->ioh, o, val);
150 } else {
151 const size_t shift = 8 * (o & 2);
152 o &= -4;
153 uint32_t tmp = bus_space_read_4(hp->iot, hp->ioh, o);
154 tmp = (val << shift) | (tmp & ~(0xffff << shift));
155 bus_space_write_4(hp->iot, hp->ioh, o, tmp);
156 }
157}
158
159#define HWRITE1(hp, reg, val) hwrite1(hp, reg, val)
160#define HWRITE2(hp, reg, val) hwrite2(hp, reg, val)
161#define HWRITE4(hp, reg, val) \
162 bus_space_write_4((hp)->iot, (hp)->ioh, (reg), (val))
163
164#define HCLR1(hp, reg, bits) \
165 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) & ~(bits)); while (0)
166#define HCLR2(hp, reg, bits) \
167 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) & ~(bits)); while (0)
168#define HCLR4(hp, reg, bits) \
169 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) & ~(bits)); while (0)
170#define HSET1(hp, reg, bits) \
171 do if (bits) HWRITE1((hp), (reg), HREAD1((hp), (reg)) | (bits)); while (0)
172#define HSET2(hp, reg, bits) \
173 do if (bits) HWRITE2((hp), (reg), HREAD2((hp), (reg)) | (bits)); while (0)
174#define HSET4(hp, reg, bits) \
175 do if (bits) HWRITE4((hp), (reg), HREAD4((hp), (reg)) | (bits)); while (0)
176
177static int sdhc_host_reset(sdmmc_chipset_handle_t);
178static int sdhc_host_reset1(sdmmc_chipset_handle_t);
179static uint32_t sdhc_host_ocr(sdmmc_chipset_handle_t);
180static int sdhc_host_maxblklen(sdmmc_chipset_handle_t);
181static int sdhc_card_detect(sdmmc_chipset_handle_t);
182static int sdhc_write_protect(sdmmc_chipset_handle_t);
183static int sdhc_bus_power(sdmmc_chipset_handle_t, uint32_t);
184static int sdhc_bus_clock_ddr(sdmmc_chipset_handle_t, int, bool);
185static int sdhc_bus_width(sdmmc_chipset_handle_t, int);
186static int sdhc_bus_rod(sdmmc_chipset_handle_t, int);
187static void sdhc_card_enable_intr(sdmmc_chipset_handle_t, int);
188static void sdhc_card_intr_ack(sdmmc_chipset_handle_t);
189static void sdhc_exec_command(sdmmc_chipset_handle_t,
190 struct sdmmc_command *);
191static int sdhc_signal_voltage(sdmmc_chipset_handle_t, int);
192static int sdhc_execute_tuning1(struct sdhc_host *, int);
193static int sdhc_execute_tuning(sdmmc_chipset_handle_t, int);
194static void sdhc_tuning_timer(void *);
195static int sdhc_start_command(struct sdhc_host *, struct sdmmc_command *);
196static int sdhc_wait_state(struct sdhc_host *, uint32_t, uint32_t);
197static int sdhc_soft_reset(struct sdhc_host *, int);
198static int sdhc_wait_intr(struct sdhc_host *, int, int, bool);
199static void sdhc_transfer_data(struct sdhc_host *, struct sdmmc_command *);
200static int sdhc_transfer_data_dma(struct sdhc_host *, struct sdmmc_command *);
201static int sdhc_transfer_data_pio(struct sdhc_host *, struct sdmmc_command *);
202static void sdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
203static void sdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
204static void esdhc_read_data_pio(struct sdhc_host *, uint8_t *, u_int);
205static void esdhc_write_data_pio(struct sdhc_host *, uint8_t *, u_int);
206
207static struct sdmmc_chip_functions sdhc_functions = {
208 /* host controller reset */
209 .host_reset = sdhc_host_reset,
210
211 /* host controller capabilities */
212 .host_ocr = sdhc_host_ocr,
213 .host_maxblklen = sdhc_host_maxblklen,
214
215 /* card detection */
216 .card_detect = sdhc_card_detect,
217
218 /* write protect */
219 .write_protect = sdhc_write_protect,
220
221 /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
222 .bus_power = sdhc_bus_power,
223 .bus_clock = NULL, /* see sdhc_bus_clock_ddr */
224 .bus_width = sdhc_bus_width,
225 .bus_rod = sdhc_bus_rod,
226
227 /* command execution */
228 .exec_command = sdhc_exec_command,
229
230 /* card interrupt */
231 .card_enable_intr = sdhc_card_enable_intr,
232 .card_intr_ack = sdhc_card_intr_ack,
233
234 /* UHS functions */
235 .signal_voltage = sdhc_signal_voltage,
236 .bus_clock_ddr = sdhc_bus_clock_ddr,
237 .execute_tuning = sdhc_execute_tuning,
238};
239
240static int
241sdhc_cfprint(void *aux, const char *pnp)
242{
243 const struct sdmmcbus_attach_args * const saa = aux;
244 const struct sdhc_host * const hp = saa->saa_sch;
245
246 if (pnp) {
247 aprint_normal("sdmmc at %s", pnp);
248 }
249 for (size_t host = 0; host < hp->sc->sc_nhosts; host++) {
250 if (hp->sc->sc_host[host] == hp) {
251 aprint_normal(" slot %zu", host);
252 }
253 }
254
255 return UNCONF;
256}
257
258/*
259 * Called by attachment driver. For each SD card slot there is one SD
260 * host controller standard register set. (1.3)
261 */
262int
263sdhc_host_found(struct sdhc_softc *sc, bus_space_tag_t iot,
264 bus_space_handle_t ioh, bus_size_t iosize)
265{
266 struct sdmmcbus_attach_args saa;
267 struct sdhc_host *hp;
268 uint32_t caps, caps2;
269 uint16_t sdhcver;
270 int error;
271
272 /* Allocate one more host structure. */
273 hp = malloc(sizeof(struct sdhc_host), M_DEVBUF, M_WAITOK|M_ZERO);
274 if (hp == NULL) {
275 aprint_error_dev(sc->sc_dev,
276 "couldn't alloc memory (sdhc host)\n");
277 goto err1;
278 }
279 sc->sc_host[sc->sc_nhosts++] = hp;
280
281 /* Fill in the new host structure. */
282 hp->sc = sc;
283 hp->iot = iot;
284 hp->ioh = ioh;
285 hp->ios = iosize;
286 hp->dmat = sc->sc_dmat;
287
288 mutex_init(&hp->intr_lock, MUTEX_DEFAULT, IPL_SDMMC);
289 cv_init(&hp->intr_cv, "sdhcintr");
290 callout_init(&hp->tuning_timer, CALLOUT_MPSAFE);
291 callout_setfunc(&hp->tuning_timer, sdhc_tuning_timer, hp);
292
293 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
294 sdhcver = SDHC_SPEC_VERS_300 << SDHC_SPEC_VERS_SHIFT;
295 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
296 sdhcver = HREAD4(hp, SDHC_ESDHC_HOST_CTL_VERSION);
297 } else {
298 sdhcver = HREAD2(hp, SDHC_HOST_CTL_VERSION);
299 }
300 aprint_normal_dev(sc->sc_dev, "SDHC ");
301 hp->specver = SDHC_SPEC_VERSION(sdhcver);
302 switch (SDHC_SPEC_VERSION(sdhcver)) {
303 case SDHC_SPEC_VERS_100:
304 aprint_normal("1.0");
305 break;
306
307 case SDHC_SPEC_VERS_200:
308 aprint_normal("2.0");
309 break;
310
311 case SDHC_SPEC_VERS_300:
312 aprint_normal("3.0");
313 break;
314
315 case SDHC_SPEC_VERS_400:
316 aprint_normal("4.0");
317 break;
318
319 default:
320 aprint_normal("unknown version(0x%x)",
321 SDHC_SPEC_VERSION(sdhcver));
322 break;
323 }
324 aprint_normal(", rev %u", SDHC_VENDOR_VERSION(sdhcver));
325
326 /*
327 * Reset the host controller and enable interrupts.
328 */
329 (void)sdhc_host_reset(hp);
330
331 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
332 /* init uSDHC registers */
333 HWRITE4(hp, SDHC_MMC_BOOT, 0);
334 HWRITE4(hp, SDHC_HOST_CTL, SDHC_USDHC_BURST_LEN_EN |
335 SDHC_USDHC_HOST_CTL_RESV23 | SDHC_USDHC_EMODE_LE);
336 HWRITE4(hp, SDHC_WATERMARK_LEVEL,
337 (0x10 << SDHC_WATERMARK_WR_BRST_SHIFT) |
338 (0x40 << SDHC_WATERMARK_WRITE_SHIFT) |
339 (0x10 << SDHC_WATERMARK_RD_BRST_SHIFT) |
340 (0x40 << SDHC_WATERMARK_READ_SHIFT));
341 HSET4(hp, SDHC_VEND_SPEC,
342 SDHC_VEND_SPEC_MBO |
343 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
344 SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN |
345 SDHC_VEND_SPEC_HCLK_SOFT_EN |
346 SDHC_VEND_SPEC_IPG_CLK_SOFT_EN |
347 SDHC_VEND_SPEC_AC12_WR_CHKBUSY_EN |
348 SDHC_VEND_SPEC_FRC_SDCLK_ON);
349 }
350
351 /* Determine host capabilities. */
352 if (ISSET(sc->sc_flags, SDHC_FLAG_HOSTCAPS)) {
353 caps = sc->sc_caps;
354 caps2 = sc->sc_caps2;
355 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
356 /* uSDHC capability register is little bit different */
357 caps = HREAD4(hp, SDHC_CAPABILITIES);
358 caps |= SDHC_8BIT_SUPP;
359 if (caps & SDHC_ADMA1_SUPP)
360 caps |= SDHC_ADMA2_SUPP;
361 sc->sc_caps = caps;
362 /* uSDHC has no SDHC_CAPABILITIES2 register */
363 caps2 = sc->sc_caps2 = SDHC_SDR50_SUPP | SDHC_DDR50_SUPP;
364 } else {
365 caps = sc->sc_caps = HREAD4(hp, SDHC_CAPABILITIES);
366 if (hp->specver >= SDHC_SPEC_VERS_300) {
367 caps2 = sc->sc_caps2 = HREAD4(hp, SDHC_CAPABILITIES2);
368 } else {
369 caps2 = sc->sc_caps2 = 0;
370 }
371 }
372
373 const u_int retuning_mode = (caps2 >> SDHC_RETUNING_MODES_SHIFT) &
374 SDHC_RETUNING_MODES_MASK;
375 if (retuning_mode == SDHC_RETUNING_MODE_1) {
376 hp->tuning_timer_count = (caps2 >> SDHC_TIMER_COUNT_SHIFT) &
377 SDHC_TIMER_COUNT_MASK;
378 if (hp->tuning_timer_count == 0xf)
379 hp->tuning_timer_count = 0;
380 if (hp->tuning_timer_count)
381 hp->tuning_timer_count =
382 1 << (hp->tuning_timer_count - 1);
383 }
384
385 /*
386 * Use DMA if the host system and the controller support it.
387 * Suports integrated or external DMA egine, with or without
388 * SDHC_DMA_ENABLE in the command.
389 */
390 if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA) ||
391 (ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA &&
392 ISSET(caps, SDHC_DMA_SUPPORT)))) {
393 SET(hp->flags, SHF_USE_DMA);
394
395 if (ISSET(sc->sc_flags, SDHC_FLAG_USE_ADMA2) &&
396 ISSET(caps, SDHC_ADMA2_SUPP)) {
397 SET(hp->flags, SHF_MODE_DMAEN);
398 /*
399 * 64-bit mode was present in the 2.00 spec, removed
400 * from 3.00, and re-added in 4.00 with a different
401 * descriptor layout. We only support 2.00 and 3.00
402 * descriptors for now.
403 */
404 if (hp->specver == SDHC_SPEC_VERS_200 &&
405 ISSET(caps, SDHC_64BIT_SYS_BUS)) {
406 SET(hp->flags, SHF_USE_ADMA2_64);
407 aprint_normal(", 64-bit ADMA2");
408 } else {
409 SET(hp->flags, SHF_USE_ADMA2_32);
410 aprint_normal(", 32-bit ADMA2");
411 }
412 } else {
413 if (!ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA) ||
414 ISSET(sc->sc_flags, SDHC_FLAG_EXTDMA_DMAEN))
415 SET(hp->flags, SHF_MODE_DMAEN);
416 if (sc->sc_vendor_transfer_data_dma) {
417 aprint_normal(", platform DMA");
418 } else {
419 aprint_normal(", SDMA");
420 }
421 }
422 } else {
423 aprint_normal(", PIO");
424 }
425
426 /*
427 * Determine the base clock frequency. (2.2.24)
428 */
429 if (hp->specver >= SDHC_SPEC_VERS_300) {
430 hp->clkbase = SDHC_BASE_V3_FREQ_KHZ(caps);
431 } else {
432 hp->clkbase = SDHC_BASE_FREQ_KHZ(caps);
433 }
434 if (hp->clkbase == 0 ||
435 ISSET(sc->sc_flags, SDHC_FLAG_NO_CLKBASE)) {
436 if (sc->sc_clkbase == 0) {
437 /* The attachment driver must tell us. */
438 aprint_error_dev(sc->sc_dev,
439 "unknown base clock frequency\n");
440 goto err;
441 }
442 hp->clkbase = sc->sc_clkbase;
443 }
444 if (hp->clkbase < 10000 || hp->clkbase > 10000 * 256) {
445 /* SDHC 1.0 supports only 10-63 MHz. */
446 aprint_error_dev(sc->sc_dev,
447 "base clock frequency out of range: %u MHz\n",
448 hp->clkbase / 1000);
449 goto err;
450 }
451 aprint_normal(", %u kHz", hp->clkbase);
452
453 /*
454 * XXX Set the data timeout counter value according to
455 * capabilities. (2.2.15)
456 */
457 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
458#if 1
459 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
460 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
461#endif
462
463 if (ISSET(caps, SDHC_EMBEDDED_SLOT))
464 aprint_normal(", embedded slot");
465
466 /*
467 * Determine SD bus voltage levels supported by the controller.
468 */
469 aprint_normal(",");
470 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP)) {
471 SET(hp->ocr, MMC_OCR_HCS);
472 aprint_normal(" HS");
473 }
474 if (ISSET(caps2, SDHC_SDR50_SUPP)) {
475 SET(hp->ocr, MMC_OCR_S18A);
476 aprint_normal(" SDR50");
477 }
478 if (ISSET(caps2, SDHC_DDR50_SUPP)) {
479 SET(hp->ocr, MMC_OCR_S18A);
480 aprint_normal(" DDR50");
481 }
482 if (ISSET(caps2, SDHC_SDR104_SUPP)) {
483 SET(hp->ocr, MMC_OCR_S18A);
484 aprint_normal(" SDR104 HS200");
485 }
486 if (ISSET(caps, SDHC_VOLTAGE_SUPP_1_8V)) {
487 SET(hp->ocr, MMC_OCR_1_65V_1_95V);
488 aprint_normal(" 1.8V");
489 }
490 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_0V)) {
491 SET(hp->ocr, MMC_OCR_2_9V_3_0V | MMC_OCR_3_0V_3_1V);
492 aprint_normal(" 3.0V");
493 }
494 if (ISSET(caps, SDHC_VOLTAGE_SUPP_3_3V)) {
495 SET(hp->ocr, MMC_OCR_3_2V_3_3V | MMC_OCR_3_3V_3_4V);
496 aprint_normal(" 3.3V");
497 }
498 if (hp->specver >= SDHC_SPEC_VERS_300) {
499 aprint_normal(", re-tuning mode %d", retuning_mode + 1);
500 if (hp->tuning_timer_count)
501 aprint_normal(" (%us timer)", hp->tuning_timer_count);
502 }
503
504 /*
505 * Determine the maximum block length supported by the host
506 * controller. (2.2.24)
507 */
508 switch((caps >> SDHC_MAX_BLK_LEN_SHIFT) & SDHC_MAX_BLK_LEN_MASK) {
509 case SDHC_MAX_BLK_LEN_512:
510 hp->maxblklen = 512;
511 break;
512
513 case SDHC_MAX_BLK_LEN_1024:
514 hp->maxblklen = 1024;
515 break;
516
517 case SDHC_MAX_BLK_LEN_2048:
518 hp->maxblklen = 2048;
519 break;
520
521 case SDHC_MAX_BLK_LEN_4096:
522 hp->maxblklen = 4096;
523 break;
524
525 default:
526 aprint_error_dev(sc->sc_dev, "max block length unknown\n");
527 goto err;
528 }
529 aprint_normal(", %u byte blocks", hp->maxblklen);
530 aprint_normal("\n");
531
532 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
533 int rseg;
534
535 /* Allocate ADMA2 descriptor memory */
536 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
537 PAGE_SIZE, hp->adma_segs, 1, &rseg, BUS_DMA_WAITOK);
538 if (error) {
539 aprint_error_dev(sc->sc_dev,
540 "ADMA2 dmamem_alloc failed (%d)\n", error);
541 goto adma_done;
542 }
543 error = bus_dmamem_map(sc->sc_dmat, hp->adma_segs, rseg,
544 PAGE_SIZE, (void **)&hp->adma2, BUS_DMA_WAITOK);
545 if (error) {
546 aprint_error_dev(sc->sc_dev,
547 "ADMA2 dmamem_map failed (%d)\n", error);
548 goto adma_done;
549 }
550 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
551 0, BUS_DMA_WAITOK, &hp->adma_map);
552 if (error) {
553 aprint_error_dev(sc->sc_dev,
554 "ADMA2 dmamap_create failed (%d)\n", error);
555 goto adma_done;
556 }
557 error = bus_dmamap_load(sc->sc_dmat, hp->adma_map,
558 hp->adma2, PAGE_SIZE, NULL,
559 BUS_DMA_WAITOK|BUS_DMA_WRITE);
560 if (error) {
561 aprint_error_dev(sc->sc_dev,
562 "ADMA2 dmamap_load failed (%d)\n", error);
563 goto adma_done;
564 }
565
566 memset(hp->adma2, 0, PAGE_SIZE);
567
568adma_done:
569 if (error)
570 CLR(hp->flags, SHF_USE_ADMA2_MASK);
571 }
572
573 /*
574 * Attach the generic SD/MMC bus driver. (The bus driver must
575 * not invoke any chipset functions before it is attached.)
576 */
577 memset(&saa, 0, sizeof(saa));
578 saa.saa_busname = "sdmmc";
579 saa.saa_sct = &sdhc_functions;
580 saa.saa_sch = hp;
581 saa.saa_dmat = hp->dmat;
582 saa.saa_clkmax = hp->clkbase;
583 if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_CGM))
584 saa.saa_clkmin = hp->clkbase / 256 / 2046;
585 else if (ISSET(sc->sc_flags, SDHC_FLAG_HAVE_DVS))
586 saa.saa_clkmin = hp->clkbase / 256 / 16;
587 else if (hp->sc->sc_clkmsk != 0)
588 saa.saa_clkmin = hp->clkbase / (hp->sc->sc_clkmsk >>
589 (ffs(hp->sc->sc_clkmsk) - 1));
590 else if (hp->specver >= SDHC_SPEC_VERS_300)
591 saa.saa_clkmin = hp->clkbase / 0x3ff;
592 else
593 saa.saa_clkmin = hp->clkbase / 256;
594 saa.saa_caps = SMC_CAPS_4BIT_MODE|SMC_CAPS_AUTO_STOP;
595 if (ISSET(sc->sc_flags, SDHC_FLAG_8BIT_MODE))
596 saa.saa_caps |= SMC_CAPS_8BIT_MODE;
597 if (ISSET(caps, SDHC_HIGH_SPEED_SUPP))
598 saa.saa_caps |= SMC_CAPS_SD_HIGHSPEED;
599 if (ISSET(caps2, SDHC_SDR104_SUPP))
600 saa.saa_caps |= SMC_CAPS_UHS_SDR104 |
601 SMC_CAPS_UHS_SDR50 |
602 SMC_CAPS_MMC_HS200;
603 if (ISSET(caps2, SDHC_SDR50_SUPP))
604 saa.saa_caps |= SMC_CAPS_UHS_SDR50;
605 if (ISSET(caps2, SDHC_DDR50_SUPP))
606 saa.saa_caps |= SMC_CAPS_UHS_DDR50;
607 if (ISSET(hp->flags, SHF_USE_DMA)) {
608 saa.saa_caps |= SMC_CAPS_DMA;
609 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
610 saa.saa_caps |= SMC_CAPS_MULTI_SEG_DMA;
611 }
612 if (ISSET(sc->sc_flags, SDHC_FLAG_SINGLE_ONLY))
613 saa.saa_caps |= SMC_CAPS_SINGLE_ONLY;
614 if (ISSET(sc->sc_flags, SDHC_FLAG_POLL_CARD_DET))
615 saa.saa_caps |= SMC_CAPS_POLL_CARD_DET;
616 hp->sdmmc = config_found(sc->sc_dev, &saa, sdhc_cfprint);
617
618 return 0;
619
620err:
621 callout_destroy(&hp->tuning_timer);
622 cv_destroy(&hp->intr_cv);
623 mutex_destroy(&hp->intr_lock);
624 free(hp, M_DEVBUF);
625 sc->sc_host[--sc->sc_nhosts] = NULL;
626err1:
627 return 1;
628}
629
630int
631sdhc_detach(struct sdhc_softc *sc, int flags)
632{
633 struct sdhc_host *hp;
634 int rv = 0;
635
636 for (size_t n = 0; n < sc->sc_nhosts; n++) {
637 hp = sc->sc_host[n];
638 if (hp == NULL)
639 continue;
640 if (hp->sdmmc != NULL) {
641 rv = config_detach(hp->sdmmc, flags);
642 if (rv)
643 break;
644 hp->sdmmc = NULL;
645 }
646 /* disable interrupts */
647 if ((flags & DETACH_FORCE) == 0) {
648 mutex_enter(&hp->intr_lock);
649 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
650 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
651 } else {
652 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
653 }
654 sdhc_soft_reset(hp, SDHC_RESET_ALL);
655 mutex_exit(&hp->intr_lock);
656 }
657 callout_halt(&hp->tuning_timer, NULL);
658 callout_destroy(&hp->tuning_timer);
659 cv_destroy(&hp->intr_cv);
660 mutex_destroy(&hp->intr_lock);
661 if (hp->ios > 0) {
662 bus_space_unmap(hp->iot, hp->ioh, hp->ios);
663 hp->ios = 0;
664 }
665 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
666 bus_dmamap_unload(sc->sc_dmat, hp->adma_map);
667 bus_dmamap_destroy(sc->sc_dmat, hp->adma_map);
668 bus_dmamem_unmap(sc->sc_dmat, hp->adma2, PAGE_SIZE);
669 bus_dmamem_free(sc->sc_dmat, hp->adma_segs, 1);
670 }
671 free(hp, M_DEVBUF);
672 sc->sc_host[n] = NULL;
673 }
674
675 return rv;
676}
677
678bool
679sdhc_suspend(device_t dev, const pmf_qual_t *qual)
680{
681 struct sdhc_softc *sc = device_private(dev);
682 struct sdhc_host *hp;
683 size_t i;
684
685 /* XXX poll for command completion or suspend command
686 * in progress */
687
688 /* Save the host controller state. */
689 for (size_t n = 0; n < sc->sc_nhosts; n++) {
690 hp = sc->sc_host[n];
691 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
692 for (i = 0; i < sizeof hp->regs; i += 4) {
693 uint32_t v = HREAD4(hp, i);
694 hp->regs[i + 0] = (v >> 0);
695 hp->regs[i + 1] = (v >> 8);
696 if (i + 3 < sizeof hp->regs) {
697 hp->regs[i + 2] = (v >> 16);
698 hp->regs[i + 3] = (v >> 24);
699 }
700 }
701 } else {
702 for (i = 0; i < sizeof hp->regs; i++) {
703 hp->regs[i] = HREAD1(hp, i);
704 }
705 }
706 }
707 return true;
708}
709
710bool
711sdhc_resume(device_t dev, const pmf_qual_t *qual)
712{
713 struct sdhc_softc *sc = device_private(dev);
714 struct sdhc_host *hp;
715 size_t i;
716
717 /* Restore the host controller state. */
718 for (size_t n = 0; n < sc->sc_nhosts; n++) {
719 hp = sc->sc_host[n];
720 (void)sdhc_host_reset(hp);
721 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
722 for (i = 0; i < sizeof hp->regs; i += 4) {
723 if (i + 3 < sizeof hp->regs) {
724 HWRITE4(hp, i,
725 (hp->regs[i + 0] << 0)
726 | (hp->regs[i + 1] << 8)
727 | (hp->regs[i + 2] << 16)
728 | (hp->regs[i + 3] << 24));
729 } else {
730 HWRITE4(hp, i,
731 (hp->regs[i + 0] << 0)
732 | (hp->regs[i + 1] << 8));
733 }
734 }
735 } else {
736 for (i = 0; i < sizeof hp->regs; i++) {
737 HWRITE1(hp, i, hp->regs[i]);
738 }
739 }
740 }
741 return true;
742}
743
744bool
745sdhc_shutdown(device_t dev, int flags)
746{
747 struct sdhc_softc *sc = device_private(dev);
748 struct sdhc_host *hp;
749
750 /* XXX chip locks up if we don't disable it before reboot. */
751 for (size_t i = 0; i < sc->sc_nhosts; i++) {
752 hp = sc->sc_host[i];
753 (void)sdhc_host_reset(hp);
754 }
755 return true;
756}
757
758/*
759 * Reset the host controller. Called during initialization, when
760 * cards are removed, upon resume, and during error recovery.
761 */
762static int
763sdhc_host_reset1(sdmmc_chipset_handle_t sch)
764{
765 struct sdhc_host *hp = (struct sdhc_host *)sch;
766 uint32_t sdhcimask;
767 int error;
768
769 KASSERT(mutex_owned(&hp->intr_lock));
770
771 /* Disable all interrupts. */
772 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
773 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, 0);
774 } else {
775 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, 0);
776 }
777
778 /*
779 * Reset the entire host controller and wait up to 100ms for
780 * the controller to clear the reset bit.
781 */
782 error = sdhc_soft_reset(hp, SDHC_RESET_ALL);
783 if (error)
784 goto out;
785
786 /* Set data timeout counter value to max for now. */
787 HWRITE1(hp, SDHC_TIMEOUT_CTL, SDHC_TIMEOUT_MAX);
788#if 1
789 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
790 HWRITE4(hp, SDHC_NINTR_STATUS, SDHC_CMD_TIMEOUT_ERROR << 16);
791#endif
792
793 /* Enable interrupts. */
794 sdhcimask = SDHC_CARD_REMOVAL | SDHC_CARD_INSERTION |
795 SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY |
796 SDHC_DMA_INTERRUPT | SDHC_BLOCK_GAP_EVENT |
797 SDHC_TRANSFER_COMPLETE | SDHC_COMMAND_COMPLETE;
798 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
799 sdhcimask |= SDHC_EINTR_STATUS_MASK << 16;
800 HWRITE4(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
801 sdhcimask ^=
802 (SDHC_EINTR_STATUS_MASK ^ SDHC_EINTR_SIGNAL_MASK) << 16;
803 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
804 HWRITE4(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
805 } else {
806 HWRITE2(hp, SDHC_NINTR_STATUS_EN, sdhcimask);
807 HWRITE2(hp, SDHC_EINTR_STATUS_EN, SDHC_EINTR_STATUS_MASK);
808 sdhcimask ^= SDHC_BUFFER_READ_READY ^ SDHC_BUFFER_WRITE_READY;
809 HWRITE2(hp, SDHC_NINTR_SIGNAL_EN, sdhcimask);
810 HWRITE2(hp, SDHC_EINTR_SIGNAL_EN, SDHC_EINTR_SIGNAL_MASK);
811 }
812
813out:
814 return error;
815}
816
817static int
818sdhc_host_reset(sdmmc_chipset_handle_t sch)
819{
820 struct sdhc_host *hp = (struct sdhc_host *)sch;
821 int error;
822
823 mutex_enter(&hp->intr_lock);
824 error = sdhc_host_reset1(sch);
825 mutex_exit(&hp->intr_lock);
826
827 return error;
828}
829
830static uint32_t
831sdhc_host_ocr(sdmmc_chipset_handle_t sch)
832{
833 struct sdhc_host *hp = (struct sdhc_host *)sch;
834
835 return hp->ocr;
836}
837
838static int
839sdhc_host_maxblklen(sdmmc_chipset_handle_t sch)
840{
841 struct sdhc_host *hp = (struct sdhc_host *)sch;
842
843 return hp->maxblklen;
844}
845
846/*
847 * Return non-zero if the card is currently inserted.
848 */
849static int
850sdhc_card_detect(sdmmc_chipset_handle_t sch)
851{
852 struct sdhc_host *hp = (struct sdhc_host *)sch;
853 int r;
854
855 if (hp->sc->sc_vendor_card_detect)
856 return (*hp->sc->sc_vendor_card_detect)(hp->sc);
857
858 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CARD_INSERTED);
859
860 return r ? 1 : 0;
861}
862
863/*
864 * Return non-zero if the card is currently write-protected.
865 */
866static int
867sdhc_write_protect(sdmmc_chipset_handle_t sch)
868{
869 struct sdhc_host *hp = (struct sdhc_host *)sch;
870 int r;
871
872 if (hp->sc->sc_vendor_write_protect)
873 return (*hp->sc->sc_vendor_write_protect)(hp->sc);
874
875 r = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_WRITE_PROTECT_SWITCH);
876
877 return r ? 0 : 1;
878}
879
880/*
881 * Set or change SD bus voltage and enable or disable SD bus power.
882 * Return zero on success.
883 */
884static int
885sdhc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr)
886{
887 struct sdhc_host *hp = (struct sdhc_host *)sch;
888 uint8_t vdd;
889 int error = 0;
890 const uint32_t pcmask =
891 ~(SDHC_BUS_POWER | (SDHC_VOLTAGE_MASK << SDHC_VOLTAGE_SHIFT));
892
893 mutex_enter(&hp->intr_lock);
894
895 /*
896 * Disable bus power before voltage change.
897 */
898 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)
899 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_PWR0))
900 HWRITE1(hp, SDHC_POWER_CTL, 0);
901
902 /* If power is disabled, reset the host and return now. */
903 if (ocr == 0) {
904 (void)sdhc_host_reset1(hp);
905 callout_halt(&hp->tuning_timer, &hp->intr_lock);
906 goto out;
907 }
908
909 /*
910 * Select the lowest voltage according to capabilities.
911 */
912 ocr &= hp->ocr;
913 if (ISSET(ocr, MMC_OCR_1_65V_1_95V)) {
914 vdd = SDHC_VOLTAGE_1_8V;
915 } else if (ISSET(ocr, MMC_OCR_2_9V_3_0V|MMC_OCR_3_0V_3_1V)) {
916 vdd = SDHC_VOLTAGE_3_0V;
917 } else if (ISSET(ocr, MMC_OCR_3_2V_3_3V|MMC_OCR_3_3V_3_4V)) {
918 vdd = SDHC_VOLTAGE_3_3V;
919 } else {
920 /* Unsupported voltage level requested. */
921 error = EINVAL;
922 goto out;
923 }
924
925 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
926 /*
927 * Enable bus power. Wait at least 1 ms (or 74 clocks) plus
928 * voltage ramp until power rises.
929 */
930
931 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SINGLE_POWER_WRITE)) {
932 HWRITE1(hp, SDHC_POWER_CTL,
933 (vdd << SDHC_VOLTAGE_SHIFT) | SDHC_BUS_POWER);
934 } else {
935 HWRITE1(hp, SDHC_POWER_CTL,
936 HREAD1(hp, SDHC_POWER_CTL) & pcmask);
937 sdmmc_delay(1);
938 HWRITE1(hp, SDHC_POWER_CTL,
939 (vdd << SDHC_VOLTAGE_SHIFT));
940 sdmmc_delay(1);
941 HSET1(hp, SDHC_POWER_CTL, SDHC_BUS_POWER);
942 sdmmc_delay(10000);
943 }
944
945 /*
946 * The host system may not power the bus due to battery low,
947 * etc. In that case, the host controller should clear the
948 * bus power bit.
949 */
950 if (!ISSET(HREAD1(hp, SDHC_POWER_CTL), SDHC_BUS_POWER)) {
951 error = ENXIO;
952 goto out;
953 }
954 }
955
956out:
957 mutex_exit(&hp->intr_lock);
958
959 return error;
960}
961
962/*
963 * Return the smallest possible base clock frequency divisor value
964 * for the CLOCK_CTL register to produce `freq' (KHz).
965 */
966static bool
967sdhc_clock_divisor(struct sdhc_host *hp, u_int freq, u_int *divp)
968{
969 u_int div;
970
971 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_CGM)) {
972 for (div = hp->clkbase / freq; div <= 0x3ff; div++) {
973 if ((hp->clkbase / div) <= freq) {
974 *divp = SDHC_SDCLK_CGM
975 | ((div & 0x300) << SDHC_SDCLK_XDIV_SHIFT)
976 | ((div & 0x0ff) << SDHC_SDCLK_DIV_SHIFT);
977 //freq = hp->clkbase / div;
978 return true;
979 }
980 }
981 /* No divisor found. */
982 return false;
983 }
984 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_HAVE_DVS)) {
985 u_int dvs = (hp->clkbase + freq - 1) / freq;
986 u_int roundup = dvs & 1;
987 for (dvs >>= 1, div = 1; div <= 256; div <<= 1, dvs >>= 1) {
988 if (dvs + roundup <= 16) {
989 dvs += roundup - 1;
990 *divp = (div << SDHC_SDCLK_DIV_SHIFT)
991 | (dvs << SDHC_SDCLK_DVS_SHIFT);
992 DPRINTF(2,
993 ("%s: divisor for freq %u is %u * %u\n",
994 HDEVNAME(hp), freq, div * 2, dvs + 1));
995 //freq = hp->clkbase / (div * 2) * (dvs + 1);
996 return true;
997 }
998 /*
999 * If we drop bits, we need to round up the divisor.
1000 */
1001 roundup |= dvs & 1;
1002 }
1003 /* No divisor found. */
1004 return false;
1005 }
1006 if (hp->sc->sc_clkmsk != 0) {
1007 div = howmany(hp->clkbase, freq);
1008 if (div > (hp->sc->sc_clkmsk >> (ffs(hp->sc->sc_clkmsk) - 1)))
1009 return false;
1010 *divp = div << (ffs(hp->sc->sc_clkmsk) - 1);
1011 //freq = hp->clkbase / div;
1012 return true;
1013 }
1014 if (hp->specver >= SDHC_SPEC_VERS_300) {
1015 div = howmany(hp->clkbase, freq);
1016 div = div > 1 ? howmany(div, 2) : 0;
1017 if (div > 0x3ff)
1018 return false;
1019 *divp = (((div >> 8) & SDHC_SDCLK_XDIV_MASK)
1020 << SDHC_SDCLK_XDIV_SHIFT) |
1021 (((div >> 0) & SDHC_SDCLK_DIV_MASK)
1022 << SDHC_SDCLK_DIV_SHIFT);
1023 //freq = hp->clkbase / (div ? div * 2 : 1);
1024 return true;
1025 } else {
1026 for (div = 1; div <= 256; div *= 2) {
1027 if ((hp->clkbase / div) <= freq) {
1028 *divp = (div / 2) << SDHC_SDCLK_DIV_SHIFT;
1029 //freq = hp->clkbase / div;
1030 return true;
1031 }
1032 }
1033 /* No divisor found. */
1034 return false;
1035 }
1036 /* No divisor found. */
1037 return false;
1038}
1039
1040/*
1041 * Set or change SDCLK frequency or disable the SD clock.
1042 * Return zero on success.
1043 */
1044static int
1045sdhc_bus_clock_ddr(sdmmc_chipset_handle_t sch, int freq, bool ddr)
1046{
1047 struct sdhc_host *hp = (struct sdhc_host *)sch;
1048 u_int div;
1049 u_int timo;
1050 int16_t reg;
1051 int error = 0;
1052 bool present __diagused;
1053
1054 mutex_enter(&hp->intr_lock);
1055
1056#ifdef DIAGNOSTIC
1057 present = ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_CMD_INHIBIT_MASK);
1058
1059 /* Must not stop the clock if commands are in progress. */
1060 if (present && sdhc_card_detect(hp)) {
1061 aprint_normal_dev(hp->sc->sc_dev,
1062 "%s: command in progress\n", __func__);
1063 }
1064#endif
1065
1066 if (hp->sc->sc_vendor_bus_clock) {
1067 error = (*hp->sc->sc_vendor_bus_clock)(hp->sc, freq);
1068 if (error != 0)
1069 goto out;
1070 }
1071
1072 /*
1073 * Stop SD clock before changing the frequency.
1074 */
1075 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1076 HCLR4(hp, SDHC_VEND_SPEC,
1077 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1078 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1079 if (freq == SDMMC_SDCLK_OFF) {
1080 goto out;
1081 }
1082 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1083 HCLR4(hp, SDHC_CLOCK_CTL, 0xfff8);
1084 if (freq == SDMMC_SDCLK_OFF) {
1085 HSET4(hp, SDHC_CLOCK_CTL, 0x80f0);
1086 goto out;
1087 }
1088 } else {
1089 HCLR2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1090 if (freq == SDMMC_SDCLK_OFF)
1091 goto out;
1092 }
1093
1094 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1095 if (ddr)
1096 HSET4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1097 else
1098 HCLR4(hp, SDHC_MIX_CTRL, SDHC_USDHC_DDR_EN);
1099 } else if (hp->specver >= SDHC_SPEC_VERS_300) {
1100 HCLR2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_MASK);
1101 if (freq > 100000) {
1102 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR104);
1103 } else if (freq > 50000) {
1104 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR50);
1105 } else if (freq > 25000) {
1106 if (ddr) {
1107 HSET2(hp, SDHC_HOST_CTL2,
1108 SDHC_UHS_MODE_SELECT_DDR50);
1109 } else {
1110 HSET2(hp, SDHC_HOST_CTL2,
1111 SDHC_UHS_MODE_SELECT_SDR25);
1112 }
1113 } else if (freq > 400) {
1114 HSET2(hp, SDHC_HOST_CTL2, SDHC_UHS_MODE_SELECT_SDR12);
1115 }
1116 }
1117
1118 /*
1119 * Slow down Ricoh 5U823 controller that isn't reliable
1120 * at 100MHz bus clock.
1121 */
1122 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_SLOW_SDR50)) {
1123 if (freq == 100000)
1124 --freq;
1125 }
1126
1127 /*
1128 * Set the minimum base clock frequency divisor.
1129 */
1130 if (!sdhc_clock_divisor(hp, freq, &div)) {
1131 /* Invalid base clock frequency or `freq' value. */
1132 aprint_error_dev(hp->sc->sc_dev,
1133 "Invalid bus clock %d kHz\n", freq);
1134 error = EINVAL;
1135 goto out;
1136 }
1137 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1138 if (ddr) {
1139 /* in ddr mode, divisor >>= 1 */
1140 div = ((div >> 1) & (SDHC_SDCLK_DIV_MASK <<
1141 SDHC_SDCLK_DIV_SHIFT)) |
1142 (div & (SDHC_SDCLK_DVS_MASK <<
1143 SDHC_SDCLK_DVS_SHIFT));
1144 }
1145 for (timo = 1000; timo > 0; timo--) {
1146 if (ISSET(HREAD4(hp, SDHC_PRESENT_STATE), SDHC_SDSTB))
1147 break;
1148 sdmmc_delay(10);
1149 }
1150 HWRITE4(hp, SDHC_CLOCK_CTL,
1151 div | (SDHC_TIMEOUT_MAX << 16) | 0x0f);
1152 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1153 HWRITE4(hp, SDHC_CLOCK_CTL,
1154 div | (SDHC_TIMEOUT_MAX << 16));
1155 } else {
1156 reg = HREAD2(hp, SDHC_CLOCK_CTL);
1157 reg &= (SDHC_INTCLK_STABLE | SDHC_INTCLK_ENABLE);
1158 HWRITE2(hp, SDHC_CLOCK_CTL, reg | div);
1159 }
1160
1161 /*
1162 * Start internal clock. Wait 10ms for stabilization.
1163 */
1164 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1165 HSET4(hp, SDHC_VEND_SPEC,
1166 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1167 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1168 } else if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1169 sdmmc_delay(10000);
1170 HSET4(hp, SDHC_CLOCK_CTL,
1171 8 | SDHC_INTCLK_ENABLE | SDHC_INTCLK_STABLE);
1172 } else {
1173 HSET2(hp, SDHC_CLOCK_CTL, SDHC_INTCLK_ENABLE);
1174 for (timo = 1000; timo > 0; timo--) {
1175 if (ISSET(HREAD2(hp, SDHC_CLOCK_CTL),
1176 SDHC_INTCLK_STABLE))
1177 break;
1178 sdmmc_delay(10);
1179 }
1180 if (timo == 0) {
1181 error = ETIMEDOUT;
1182 DPRINTF(1,("%s: timeout\n", __func__));
1183 goto out;
1184 }
1185 }
1186
1187 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1188 HSET1(hp, SDHC_SOFTWARE_RESET, SDHC_INIT_ACTIVE);
1189 /*
1190 * Sending 80 clocks at 400kHz takes 200us.
1191 * So delay for that time + slop and then
1192 * check a few times for completion.
1193 */
1194 sdmmc_delay(210);
1195 for (timo = 10; timo > 0; timo--) {
1196 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET),
1197 SDHC_INIT_ACTIVE))
1198 break;
1199 sdmmc_delay(10);
1200 }
1201 DPRINTF(2,("%s: %u init spins\n", __func__, 10 - timo));
1202
1203 /*
1204 * Enable SD clock.
1205 */
1206 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1207 HSET4(hp, SDHC_VEND_SPEC,
1208 SDHC_VEND_SPEC_CARD_CLK_SOFT_EN |
1209 SDHC_VEND_SPEC_FRC_SDCLK_ON);
1210 } else {
1211 HSET4(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1212 }
1213 } else {
1214 /*
1215 * Enable SD clock.
1216 */
1217 HSET2(hp, SDHC_CLOCK_CTL, SDHC_SDCLK_ENABLE);
1218
1219 if (freq > 25000 &&
1220 !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_HS_BIT))
1221 HSET1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1222 else
1223 HCLR1(hp, SDHC_HOST_CTL, SDHC_HIGH_SPEED);
1224 }
1225
1226out:
1227 mutex_exit(&hp->intr_lock);
1228
1229 return error;
1230}
1231
1232static int
1233sdhc_bus_width(sdmmc_chipset_handle_t sch, int width)
1234{
1235 struct sdhc_host *hp = (struct sdhc_host *)sch;
1236 int reg;
1237
1238 switch (width) {
1239 case 1:
1240 case 4:
1241 break;
1242
1243 case 8:
1244 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_8BIT_MODE))
1245 break;
1246 /* FALLTHROUGH */
1247 default:
1248 DPRINTF(0,("%s: unsupported bus width (%d)\n",
1249 HDEVNAME(hp), width));
1250 return 1;
1251 }
1252
1253 if (hp->sc->sc_vendor_bus_width) {
1254 const int error = hp->sc->sc_vendor_bus_width(hp->sc, width);
1255 if (error != 0)
1256 return error;
1257 }
1258
1259 mutex_enter(&hp->intr_lock);
1260
1261 reg = HREAD1(hp, SDHC_HOST_CTL);
1262 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1263 reg &= ~(SDHC_4BIT_MODE|SDHC_ESDHC_8BIT_MODE);
1264 if (width == 4)
1265 reg |= SDHC_4BIT_MODE;
1266 else if (width == 8)
1267 reg |= SDHC_ESDHC_8BIT_MODE;
1268 } else {
1269 reg &= ~SDHC_4BIT_MODE;
1270 if (hp->specver >= SDHC_SPEC_VERS_300) {
1271 reg &= ~SDHC_8BIT_MODE;
1272 }
1273 if (width == 4) {
1274 reg |= SDHC_4BIT_MODE;
1275 } else if (width == 8 && hp->specver >= SDHC_SPEC_VERS_300) {
1276 reg |= SDHC_8BIT_MODE;
1277 }
1278 }
1279 HWRITE1(hp, SDHC_HOST_CTL, reg);
1280
1281 mutex_exit(&hp->intr_lock);
1282
1283 return 0;
1284}
1285
1286static int
1287sdhc_bus_rod(sdmmc_chipset_handle_t sch, int on)
1288{
1289 struct sdhc_host *hp = (struct sdhc_host *)sch;
1290
1291 if (hp->sc->sc_vendor_rod)
1292 return (*hp->sc->sc_vendor_rod)(hp->sc, on);
1293
1294 return 0;
1295}
1296
1297static void
1298sdhc_card_enable_intr(sdmmc_chipset_handle_t sch, int enable)
1299{
1300 struct sdhc_host *hp = (struct sdhc_host *)sch;
1301
1302 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1303 mutex_enter(&hp->intr_lock);
1304 if (enable) {
1305 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1306 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1307 } else {
1308 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_CARD_INTERRUPT);
1309 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1310 }
1311 mutex_exit(&hp->intr_lock);
1312 }
1313}
1314
1315static void
1316sdhc_card_intr_ack(sdmmc_chipset_handle_t sch)
1317{
1318 struct sdhc_host *hp = (struct sdhc_host *)sch;
1319
1320 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1321 mutex_enter(&hp->intr_lock);
1322 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
1323 mutex_exit(&hp->intr_lock);
1324 }
1325}
1326
1327static int
1328sdhc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
1329{
1330 struct sdhc_host *hp = (struct sdhc_host *)sch;
1331
1332 mutex_enter(&hp->intr_lock);
1333 switch (signal_voltage) {
1334 case SDMMC_SIGNAL_VOLTAGE_180:
1335 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1336 HSET2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1337 break;
1338 case SDMMC_SIGNAL_VOLTAGE_330:
1339 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC))
1340 HCLR2(hp, SDHC_HOST_CTL2, SDHC_1_8V_SIGNAL_EN);
1341 break;
1342 default:
1343 return EINVAL;
1344 }
1345 mutex_exit(&hp->intr_lock);
1346
1347 return 0;
1348}
1349
1350/*
1351 * Sampling clock tuning procedure (UHS)
1352 */
1353static int
1354sdhc_execute_tuning1(struct sdhc_host *hp, int timing)
1355{
1356 struct sdmmc_command cmd;
1357 uint8_t hostctl;
1358 int opcode, error, retry = 40;
1359
1360 KASSERT(mutex_owned(&hp->intr_lock));
1361
1362 hp->tuning_timing = timing;
1363
1364 switch (timing) {
1365 case SDMMC_TIMING_MMC_HS200:
1366 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1367 break;
1368 case SDMMC_TIMING_UHS_SDR50:
1369 if (!ISSET(hp->sc->sc_caps2, SDHC_TUNING_SDR50))
1370 return 0;
1371 /* FALLTHROUGH */
1372 case SDMMC_TIMING_UHS_SDR104:
1373 opcode = MMC_SEND_TUNING_BLOCK;
1374 break;
1375 default:
1376 return EINVAL;
1377 }
1378
1379 hostctl = HREAD1(hp, SDHC_HOST_CTL);
1380
1381 /* enable buffer read ready interrupt */
1382 HSET2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1383 HSET2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1384
1385 /* disable DMA */
1386 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1387
1388 /* reset tuning circuit */
1389 HCLR2(hp, SDHC_HOST_CTL2, SDHC_SAMPLING_CLOCK_SEL);
1390
1391 /* start of tuning */
1392 HWRITE2(hp, SDHC_HOST_CTL2, SDHC_EXECUTE_TUNING);
1393
1394 do {
1395 memset(&cmd, 0, sizeof(cmd));
1396 cmd.c_opcode = opcode;
1397 cmd.c_arg = 0;
1398 cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1399 if (ISSET(hostctl, SDHC_8BIT_MODE)) {
1400 cmd.c_blklen = cmd.c_datalen = 128;
1401 } else {
1402 cmd.c_blklen = cmd.c_datalen = 64;
1403 }
1404
1405 error = sdhc_start_command(hp, &cmd);
1406 if (error)
1407 break;
1408
1409 if (!sdhc_wait_intr(hp, SDHC_BUFFER_READ_READY,
1410 SDHC_TUNING_TIMEOUT, false)) {
1411 break;
1412 }
1413
1414 delay(1000);
1415 } while (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING && --retry);
1416
1417 /* disable buffer read ready interrupt */
1418 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, SDHC_BUFFER_READ_READY);
1419 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_BUFFER_READ_READY);
1420
1421 if (HREAD2(hp, SDHC_HOST_CTL2) & SDHC_EXECUTE_TUNING) {
1422 HCLR2(hp, SDHC_HOST_CTL2,
1423 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1424 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1425 aprint_error_dev(hp->sc->sc_dev,
1426 "tuning did not complete, using fixed sampling clock\n");
1427 return EIO; /* tuning did not complete */
1428 }
1429
1430 if ((HREAD2(hp, SDHC_HOST_CTL2) & SDHC_SAMPLING_CLOCK_SEL) == 0) {
1431 HCLR2(hp, SDHC_HOST_CTL2,
1432 SDHC_SAMPLING_CLOCK_SEL|SDHC_EXECUTE_TUNING);
1433 sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1434 aprint_error_dev(hp->sc->sc_dev,
1435 "tuning failed, using fixed sampling clock\n");
1436 return EIO; /* tuning failed */
1437 }
1438
1439 if (hp->tuning_timer_count) {
1440 callout_schedule(&hp->tuning_timer,
1441 hz * hp->tuning_timer_count);
1442 }
1443
1444 return 0; /* tuning completed */
1445}
1446
1447static int
1448sdhc_execute_tuning(sdmmc_chipset_handle_t sch, int timing)
1449{
1450 struct sdhc_host *hp = (struct sdhc_host *)sch;
1451 int error;
1452
1453 mutex_enter(&hp->intr_lock);
1454 error = sdhc_execute_tuning1(hp, timing);
1455 mutex_exit(&hp->intr_lock);
1456 return error;
1457}
1458
1459static void
1460sdhc_tuning_timer(void *arg)
1461{
1462 struct sdhc_host *hp = arg;
1463
1464 atomic_swap_uint(&hp->tuning_timer_pending, 1);
1465}
1466
1467static int
1468sdhc_wait_state(struct sdhc_host *hp, uint32_t mask, uint32_t value)
1469{
1470 uint32_t state;
1471 int timeout;
1472
1473 for (timeout = 10000; timeout > 0; timeout--) {
1474 if (((state = HREAD4(hp, SDHC_PRESENT_STATE)) & mask) == value)
1475 return 0;
1476 sdmmc_delay(10);
1477 }
1478 aprint_error_dev(hp->sc->sc_dev, "timeout waiting for mask %#x value %#x (state=%#x)\n",
1479 mask, value, state);
1480 return ETIMEDOUT;
1481}
1482
1483static void
1484sdhc_exec_command(sdmmc_chipset_handle_t sch, struct sdmmc_command *cmd)
1485{
1486 struct sdhc_host *hp = (struct sdhc_host *)sch;
1487 int error;
1488 bool probing;
1489
1490 mutex_enter(&hp->intr_lock);
1491
1492 if (atomic_cas_uint(&hp->tuning_timer_pending, 1, 0) == 1) {
1493 (void)sdhc_execute_tuning1(hp, hp->tuning_timing);
1494 }
1495
1496 if (cmd->c_data &&
1497 ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1498 const uint16_t ready = SDHC_BUFFER_READ_READY | SDHC_BUFFER_WRITE_READY;
1499 if (ISSET(hp->flags, SHF_USE_DMA)) {
1500 HCLR2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1501 HCLR2(hp, SDHC_NINTR_STATUS_EN, ready);
1502 } else {
1503 HSET2(hp, SDHC_NINTR_SIGNAL_EN, ready);
1504 HSET2(hp, SDHC_NINTR_STATUS_EN, ready);
1505 }
1506 }
1507
1508 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_TIMEOUT)) {
1509 const uint16_t eintr = SDHC_CMD_TIMEOUT_ERROR;
1510 if (cmd->c_data != NULL) {
1511 HCLR2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1512 HCLR2(hp, SDHC_EINTR_STATUS_EN, eintr);
1513 } else {
1514 HSET2(hp, SDHC_EINTR_SIGNAL_EN, eintr);
1515 HSET2(hp, SDHC_EINTR_STATUS_EN, eintr);
1516 }
1517 }
1518
1519 /*
1520 * Start the MMC command, or mark `cmd' as failed and return.
1521 */
1522 error = sdhc_start_command(hp, cmd);
1523 if (error) {
1524 cmd->c_error = error;
1525 goto out;
1526 }
1527
1528 /*
1529 * Wait until the command phase is done, or until the command
1530 * is marked done for any other reason.
1531 */
1532 probing = (cmd->c_flags & SCF_TOUT_OK) != 0;
1533 if (!sdhc_wait_intr(hp, SDHC_COMMAND_COMPLETE, SDHC_COMMAND_TIMEOUT, probing)) {
1534 DPRINTF(1,("%s: timeout for command\n", __func__));
1535 sdmmc_delay(50);
1536 cmd->c_error = ETIMEDOUT;
1537 goto out;
1538 }
1539
1540 /*
1541 * The host controller removes bits [0:7] from the response
1542 * data (CRC) and we pass the data up unchanged to the bus
1543 * driver (without padding).
1544 */
1545 if (cmd->c_error == 0 && ISSET(cmd->c_flags, SCF_RSP_PRESENT)) {
1546 cmd->c_resp[0] = HREAD4(hp, SDHC_RESPONSE + 0);
1547 if (ISSET(cmd->c_flags, SCF_RSP_136)) {
1548 cmd->c_resp[1] = HREAD4(hp, SDHC_RESPONSE + 4);
1549 cmd->c_resp[2] = HREAD4(hp, SDHC_RESPONSE + 8);
1550 cmd->c_resp[3] = HREAD4(hp, SDHC_RESPONSE + 12);
1551 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_RSP136_CRC)) {
1552 cmd->c_resp[0] = (cmd->c_resp[0] >> 8) |
1553 (cmd->c_resp[1] << 24);
1554 cmd->c_resp[1] = (cmd->c_resp[1] >> 8) |
1555 (cmd->c_resp[2] << 24);
1556 cmd->c_resp[2] = (cmd->c_resp[2] >> 8) |
1557 (cmd->c_resp[3] << 24);
1558 cmd->c_resp[3] = (cmd->c_resp[3] >> 8);
1559 }
1560 }
1561 }
1562 DPRINTF(1,("%s: resp = %08x\n", HDEVNAME(hp), cmd->c_resp[0]));
1563
1564 /*
1565 * If the command has data to transfer in any direction,
1566 * execute the transfer now.
1567 */
1568 if (cmd->c_error == 0 && cmd->c_data != NULL)
1569 sdhc_transfer_data(hp, cmd);
1570 else if (ISSET(cmd->c_flags, SCF_RSP_BSY)) {
1571 if (!sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE, hz * 10, false)) {
1572 DPRINTF(1,("%s: sdhc_exec_command: RSP_BSY\n",
1573 HDEVNAME(hp)));
1574 cmd->c_error = ETIMEDOUT;
1575 goto out;
1576 }
1577 }
1578
1579out:
1580 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)
1581 && !ISSET(hp->sc->sc_flags, SDHC_FLAG_NO_LED_ON)) {
1582 /* Turn off the LED. */
1583 HCLR1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1584 }
1585 SET(cmd->c_flags, SCF_ITSDONE);
1586
1587 mutex_exit(&hp->intr_lock);
1588
1589 DPRINTF(1,("%s: cmd %d %s (flags=%08x error=%d)\n", HDEVNAME(hp),
1590 cmd->c_opcode, (cmd->c_error == 0) ? "done" : "abort",
1591 cmd->c_flags, cmd->c_error));
1592}
1593
1594static int
1595sdhc_start_command(struct sdhc_host *hp, struct sdmmc_command *cmd)
1596{
1597 struct sdhc_softc * const sc = hp->sc;
1598 uint16_t blksize = 0;
1599 uint16_t blkcount = 0;
1600 uint16_t mode;
1601 uint16_t command;
1602 uint32_t pmask;
1603 int error;
1604
1605 KASSERT(mutex_owned(&hp->intr_lock));
1606
1607 DPRINTF(1,("%s: start cmd %d arg=%08x data=%p dlen=%d flags=%08x, status=%#x\n",
1608 HDEVNAME(hp), cmd->c_opcode, cmd->c_arg, cmd->c_data,
1609 cmd->c_datalen, cmd->c_flags, HREAD4(hp, SDHC_NINTR_STATUS)));
1610
1611 /*
1612 * The maximum block length for commands should be the minimum
1613 * of the host buffer size and the card buffer size. (1.7.2)
1614 */
1615
1616 /* Fragment the data into proper blocks. */
1617 if (cmd->c_datalen > 0) {
1618 blksize = MIN(cmd->c_datalen, cmd->c_blklen);
1619 blkcount = cmd->c_datalen / blksize;
1620 if (cmd->c_datalen % blksize > 0) {
1621 /* XXX: Split this command. (1.7.4) */
1622 aprint_error_dev(sc->sc_dev,
1623 "data not a multiple of %u bytes\n", blksize);
1624 return EINVAL;
1625 }
1626 }
1627
1628 /* Check limit imposed by 9-bit block count. (1.7.2) */
1629 if (blkcount > SDHC_BLOCK_COUNT_MAX) {
1630 aprint_error_dev(sc->sc_dev, "too much data\n");
1631 return EINVAL;
1632 }
1633
1634 /* Prepare transfer mode register value. (2.2.5) */
1635 mode = SDHC_BLOCK_COUNT_ENABLE;
1636 if (ISSET(cmd->c_flags, SCF_CMD_READ))
1637 mode |= SDHC_READ_MODE;
1638 if (blkcount > 1) {
1639 mode |= SDHC_MULTI_BLOCK_MODE;
1640 /* XXX only for memory commands? */
1641 mode |= SDHC_AUTO_CMD12_ENABLE;
1642 }
1643 if (cmd->c_dmamap != NULL && cmd->c_datalen > 0 &&
1644 ISSET(hp->flags, SHF_MODE_DMAEN)) {
1645 mode |= SDHC_DMA_ENABLE;
1646 }
1647
1648 /*
1649 * Prepare command register value. (2.2.6)
1650 */
1651 command = (cmd->c_opcode & SDHC_COMMAND_INDEX_MASK) << SDHC_COMMAND_INDEX_SHIFT;
1652
1653 if (ISSET(cmd->c_flags, SCF_RSP_CRC))
1654 command |= SDHC_CRC_CHECK_ENABLE;
1655 if (ISSET(cmd->c_flags, SCF_RSP_IDX))
1656 command |= SDHC_INDEX_CHECK_ENABLE;
1657 if (cmd->c_datalen > 0)
1658 command |= SDHC_DATA_PRESENT_SELECT;
1659
1660 if (!ISSET(cmd->c_flags, SCF_RSP_PRESENT))
1661 command |= SDHC_NO_RESPONSE;
1662 else if (ISSET(cmd->c_flags, SCF_RSP_136))
1663 command |= SDHC_RESP_LEN_136;
1664 else if (ISSET(cmd->c_flags, SCF_RSP_BSY))
1665 command |= SDHC_RESP_LEN_48_CHK_BUSY;
1666 else
1667 command |= SDHC_RESP_LEN_48;
1668
1669 /* Wait until command and optionally data inhibit bits are clear. (1.5) */
1670 pmask = SDHC_CMD_INHIBIT_CMD;
1671 if (cmd->c_flags & (SCF_CMD_ADTC|SCF_RSP_BSY))
1672 pmask |= SDHC_CMD_INHIBIT_DAT;
1673 error = sdhc_wait_state(hp, pmask, 0);
1674 if (error) {
1675 (void) sdhc_soft_reset(hp, SDHC_RESET_DAT|SDHC_RESET_CMD);
1676 device_printf(sc->sc_dev, "command or data phase inhibited\n");
1677 return error;
1678 }
1679
1680 DPRINTF(1,("%s: writing cmd: blksize=%d blkcnt=%d mode=%04x cmd=%04x\n",
1681 HDEVNAME(hp), blksize, blkcount, mode, command));
1682
1683 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1684 blksize |= (MAX(0, PAGE_SHIFT - 12) & SDHC_DMA_BOUNDARY_MASK) <<
1685 SDHC_DMA_BOUNDARY_SHIFT; /* PAGE_SIZE DMA boundary */
1686 }
1687
1688 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
1689 /* Alert the user not to remove the card. */
1690 HSET1(hp, SDHC_HOST_CTL, SDHC_LED_ON);
1691 }
1692
1693 /* Set DMA start address. */
1694 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK) && cmd->c_data != NULL) {
1695 for (int seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
1696 bus_addr_t paddr =
1697 cmd->c_dmamap->dm_segs[seg].ds_addr;
1698 uint16_t len =
1699 cmd->c_dmamap->dm_segs[seg].ds_len == 65536 ?
1700 0 : cmd->c_dmamap->dm_segs[seg].ds_len;
1701 uint16_t attr =
1702 SDHC_ADMA2_VALID | SDHC_ADMA2_ACT_TRANS;
1703 if (seg == cmd->c_dmamap->dm_nsegs - 1) {
1704 attr |= SDHC_ADMA2_END;
1705 }
1706 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1707 struct sdhc_adma2_descriptor32 *desc =
1708 hp->adma2;
1709 desc[seg].attribute = htole16(attr);
1710 desc[seg].length = htole16(len);
1711 desc[seg].address = htole32(paddr);
1712 } else {
1713 struct sdhc_adma2_descriptor64 *desc =
1714 hp->adma2;
1715 desc[seg].attribute = htole16(attr);
1716 desc[seg].length = htole16(len);
1717 desc[seg].address = htole32(paddr & 0xffffffff);
1718 desc[seg].address_hi = htole32(
1719 (uint64_t)paddr >> 32);
1720 }
1721 }
1722 if (ISSET(hp->flags, SHF_USE_ADMA2_32)) {
1723 struct sdhc_adma2_descriptor32 *desc = hp->adma2;
1724 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1725 } else {
1726 struct sdhc_adma2_descriptor64 *desc = hp->adma2;
1727 desc[cmd->c_dmamap->dm_nsegs].attribute = htole16(0);
1728 }
1729 bus_dmamap_sync(sc->sc_dmat, hp->adma_map, 0, PAGE_SIZE,
1730 BUS_DMASYNC_PREWRITE);
1731 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1732 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1733 HSET4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT_ADMA2);
1734 } else {
1735 HCLR1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT);
1736 HSET1(hp, SDHC_HOST_CTL, SDHC_DMA_SELECT_ADMA2);
1737 }
1738
1739 const bus_addr_t desc_addr = hp->adma_map->dm_segs[0].ds_addr;
1740
1741 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR, desc_addr & 0xffffffff);
1742 if (ISSET(hp->flags, SHF_USE_ADMA2_64)) {
1743 HWRITE4(hp, SDHC_ADMA_SYSTEM_ADDR + 4,
1744 (uint64_t)desc_addr >> 32);
1745 }
1746 } else if (ISSET(mode, SDHC_DMA_ENABLE) &&
1747 !ISSET(sc->sc_flags, SDHC_FLAG_EXTERNAL_DMA)) {
1748 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1749 HCLR4(hp, SDHC_HOST_CTL, SDHC_USDHC_DMA_SELECT);
1750 }
1751 HWRITE4(hp, SDHC_DMA_ADDR, cmd->c_dmamap->dm_segs[0].ds_addr);
1752 }
1753
1754 /*
1755 * Start a CPU data transfer. Writing to the high order byte
1756 * of the SDHC_COMMAND register triggers the SD command. (1.5)
1757 */
1758 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1759 HWRITE4(hp, SDHC_BLOCK_SIZE, blksize | (blkcount << 16));
1760 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1761 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_USDHC)) {
1762 /* mode bits is in MIX_CTRL register on uSDHC */
1763 HWRITE4(hp, SDHC_MIX_CTRL, mode |
1764 (HREAD4(hp, SDHC_MIX_CTRL) &
1765 ~(SDHC_MULTI_BLOCK_MODE |
1766 SDHC_READ_MODE |
1767 SDHC_AUTO_CMD12_ENABLE |
1768 SDHC_BLOCK_COUNT_ENABLE |
1769 SDHC_DMA_ENABLE)));
1770 HWRITE4(hp, SDHC_TRANSFER_MODE, command << 16);
1771 } else {
1772 HWRITE4(hp, SDHC_TRANSFER_MODE, mode | (command << 16));
1773 }
1774 } else {
1775 HWRITE2(hp, SDHC_BLOCK_SIZE, blksize);
1776 HWRITE2(hp, SDHC_BLOCK_COUNT, blkcount);
1777 HWRITE4(hp, SDHC_ARGUMENT, cmd->c_arg);
1778 HWRITE2(hp, SDHC_TRANSFER_MODE, mode);
1779 HWRITE2(hp, SDHC_COMMAND, command);
1780 }
1781
1782 return 0;
1783}
1784
1785static void
1786sdhc_transfer_data(struct sdhc_host *hp, struct sdmmc_command *cmd)
1787{
1788 struct sdhc_softc *sc = hp->sc;
1789 int error;
1790
1791 KASSERT(mutex_owned(&hp->intr_lock));
1792
1793 DPRINTF(1,("%s: data transfer: resp=%08x datalen=%u\n", HDEVNAME(hp),
1794 MMC_R1(cmd->c_resp), cmd->c_datalen));
1795
1796#ifdef SDHC_DEBUG
1797 /* XXX I forgot why I wanted to know when this happens :-( */
1798 if ((cmd->c_opcode == 52 || cmd->c_opcode == 53) &&
1799 ISSET(MMC_R1(cmd->c_resp), 0xcb00)) {
1800 aprint_error_dev(hp->sc->sc_dev,
1801 "CMD52/53 error response flags %#x\n",
1802 MMC_R1(cmd->c_resp) & 0xff00);
1803 }
1804#endif
1805
1806 if (cmd->c_dmamap != NULL) {
1807 if (hp->sc->sc_vendor_transfer_data_dma != NULL) {
1808 error = hp->sc->sc_vendor_transfer_data_dma(sc, cmd);
1809 if (error == 0 && !sdhc_wait_intr(hp,
1810 SDHC_TRANSFER_COMPLETE, SDHC_DMA_TIMEOUT, false)) {
1811 DPRINTF(1,("%s: timeout\n", __func__));
1812 error = ETIMEDOUT;
1813 }
1814 } else {
1815 error = sdhc_transfer_data_dma(hp, cmd);
1816 }
1817 } else
1818 error = sdhc_transfer_data_pio(hp, cmd);
1819 if (error)
1820 cmd->c_error = error;
1821 SET(cmd->c_flags, SCF_ITSDONE);
1822
1823 DPRINTF(1,("%s: data transfer done (error=%d)\n",
1824 HDEVNAME(hp), cmd->c_error));
1825}
1826
1827static int
1828sdhc_transfer_data_dma(struct sdhc_host *hp, struct sdmmc_command *cmd)
1829{
1830 bus_dma_segment_t *dm_segs = cmd->c_dmamap->dm_segs;
1831 bus_addr_t posaddr;
1832 bus_addr_t segaddr;
1833 bus_size_t seglen;
1834 u_int seg = 0;
1835 int error = 0;
1836 int status;
1837
1838 KASSERT(mutex_owned(&hp->intr_lock));
1839 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_DMA_INTERRUPT);
1840 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_DMA_INTERRUPT);
1841 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1842 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1843
1844 for (;;) {
1845 status = sdhc_wait_intr(hp,
1846 SDHC_DMA_INTERRUPT|SDHC_TRANSFER_COMPLETE,
1847 SDHC_DMA_TIMEOUT, false);
1848
1849 if (status & SDHC_TRANSFER_COMPLETE) {
1850 break;
1851 }
1852 if (!status) {
1853 DPRINTF(1,("%s: timeout\n", __func__));
1854 error = ETIMEDOUT;
1855 break;
1856 }
1857
1858 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1859 continue;
1860 }
1861
1862 if ((status & SDHC_DMA_INTERRUPT) == 0) {
1863 continue;
1864 }
1865
1866 /* DMA Interrupt (boundary crossing) */
1867
1868 segaddr = dm_segs[seg].ds_addr;
1869 seglen = dm_segs[seg].ds_len;
1870 posaddr = HREAD4(hp, SDHC_DMA_ADDR);
1871
1872 if ((seg == (cmd->c_dmamap->dm_nsegs-1)) && (posaddr == (segaddr + seglen))) {
1873 continue;
1874 }
1875 if ((posaddr >= segaddr) && (posaddr < (segaddr + seglen)))
1876 HWRITE4(hp, SDHC_DMA_ADDR, posaddr);
1877 else if ((posaddr >= segaddr) && (posaddr == (segaddr + seglen)) && (seg + 1) < cmd->c_dmamap->dm_nsegs)
1878 HWRITE4(hp, SDHC_DMA_ADDR, dm_segs[++seg].ds_addr);
1879 KASSERT(seg < cmd->c_dmamap->dm_nsegs);
1880 }
1881
1882 if (ISSET(hp->flags, SHF_USE_ADMA2_MASK)) {
1883 bus_dmamap_sync(hp->sc->sc_dmat, hp->adma_map, 0,
1884 PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
1885 }
1886
1887 return error;
1888}
1889
1890static int
1891sdhc_transfer_data_pio(struct sdhc_host *hp, struct sdmmc_command *cmd)
1892{
1893 uint8_t *data = cmd->c_data;
1894 void (*pio_func)(struct sdhc_host *, uint8_t *, u_int);
1895 u_int len, datalen;
1896 u_int imask;
1897 u_int pmask;
1898 int error = 0;
1899
1900 KASSERT(mutex_owned(&hp->intr_lock));
1901
1902 if (ISSET(cmd->c_flags, SCF_CMD_READ)) {
1903 imask = SDHC_BUFFER_READ_READY;
1904 pmask = SDHC_BUFFER_READ_ENABLE;
1905 if (ISSET(hp->sc->sc_flags,
1906 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1907 pio_func = esdhc_read_data_pio;
1908 } else {
1909 pio_func = sdhc_read_data_pio;
1910 }
1911 } else {
1912 imask = SDHC_BUFFER_WRITE_READY;
1913 pmask = SDHC_BUFFER_WRITE_ENABLE;
1914 if (ISSET(hp->sc->sc_flags,
1915 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
1916 pio_func = esdhc_write_data_pio;
1917 } else {
1918 pio_func = sdhc_write_data_pio;
1919 }
1920 }
1921 datalen = cmd->c_datalen;
1922
1923 KASSERT(mutex_owned(&hp->intr_lock));
1924 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & imask);
1925 KASSERT(HREAD2(hp, SDHC_NINTR_STATUS_EN) & SDHC_TRANSFER_COMPLETE);
1926 KASSERT(HREAD2(hp, SDHC_NINTR_SIGNAL_EN) & SDHC_TRANSFER_COMPLETE);
1927
1928 while (datalen > 0) {
1929 if (!ISSET(HREAD4(hp, SDHC_PRESENT_STATE), pmask)) {
1930 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
1931 HSET4(hp, SDHC_NINTR_SIGNAL_EN, imask);
1932 } else {
1933 HSET2(hp, SDHC_NINTR_SIGNAL_EN, imask);
1934 }
1935 if (!sdhc_wait_intr(hp, imask, SDHC_BUFFER_TIMEOUT, false)) {
1936 DPRINTF(1,("%s: timeout\n", __func__));
1937 error = ETIMEDOUT;
1938 break;
1939 }
1940
1941 error = sdhc_wait_state(hp, pmask, pmask);
1942 if (error)
1943 break;
1944 }
1945
1946 len = MIN(datalen, cmd->c_blklen);
1947 (*pio_func)(hp, data, len);
1948 DPRINTF(2,("%s: pio data transfer %u @ %p\n",
1949 HDEVNAME(hp), len, data));
1950
1951 data += len;
1952 datalen -= len;
1953 }
1954
1955 if (error == 0 && !sdhc_wait_intr(hp, SDHC_TRANSFER_COMPLETE,
1956 SDHC_TRANSFER_TIMEOUT, false)) {
1957 DPRINTF(1,("%s: timeout for transfer\n", __func__));
1958 error = ETIMEDOUT;
1959 }
1960
1961 return error;
1962}
1963
1964static void
1965sdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
1966{
1967
1968 if (((__uintptr_t)data & 3) == 0) {
1969 while (datalen > 3) {
1970 *(uint32_t *)data = le32toh(HREAD4(hp, SDHC_DATA));
1971 data += 4;
1972 datalen -= 4;
1973 }
1974 if (datalen > 1) {
1975 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1976 data += 2;
1977 datalen -= 2;
1978 }
1979 if (datalen > 0) {
1980 *data = HREAD1(hp, SDHC_DATA);
1981 data += 1;
1982 datalen -= 1;
1983 }
1984 } else if (((__uintptr_t)data & 1) == 0) {
1985 while (datalen > 1) {
1986 *(uint16_t *)data = le16toh(HREAD2(hp, SDHC_DATA));
1987 data += 2;
1988 datalen -= 2;
1989 }
1990 if (datalen > 0) {
1991 *data = HREAD1(hp, SDHC_DATA);
1992 data += 1;
1993 datalen -= 1;
1994 }
1995 } else {
1996 while (datalen > 0) {
1997 *data = HREAD1(hp, SDHC_DATA);
1998 data += 1;
1999 datalen -= 1;
2000 }
2001 }
2002}
2003
2004static void
2005sdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2006{
2007
2008 if (((__uintptr_t)data & 3) == 0) {
2009 while (datalen > 3) {
2010 HWRITE4(hp, SDHC_DATA, htole32(*(uint32_t *)data));
2011 data += 4;
2012 datalen -= 4;
2013 }
2014 if (datalen > 1) {
2015 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2016 data += 2;
2017 datalen -= 2;
2018 }
2019 if (datalen > 0) {
2020 HWRITE1(hp, SDHC_DATA, *data);
2021 data += 1;
2022 datalen -= 1;
2023 }
2024 } else if (((__uintptr_t)data & 1) == 0) {
2025 while (datalen > 1) {
2026 HWRITE2(hp, SDHC_DATA, htole16(*(uint16_t *)data));
2027 data += 2;
2028 datalen -= 2;
2029 }
2030 if (datalen > 0) {
2031 HWRITE1(hp, SDHC_DATA, *data);
2032 data += 1;
2033 datalen -= 1;
2034 }
2035 } else {
2036 while (datalen > 0) {
2037 HWRITE1(hp, SDHC_DATA, *data);
2038 data += 1;
2039 datalen -= 1;
2040 }
2041 }
2042}
2043
2044static void
2045esdhc_read_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2046{
2047 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2048 uint32_t v;
2049
2050 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_READ_SHIFT) & SDHC_WATERMARK_READ_MASK;
2051 size_t count = 0;
2052
2053 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2054 if (count == 0) {
2055 /*
2056 * If we've drained "watermark" words, we need to wait
2057 * a little bit so the read FIFO can refill.
2058 */
2059 sdmmc_delay(10);
2060 count = watermark;
2061 }
2062 v = HREAD4(hp, SDHC_DATA);
2063 v = le32toh(v);
2064 *(uint32_t *)data = v;
2065 data += 4;
2066 datalen -= 4;
2067 status = HREAD2(hp, SDHC_NINTR_STATUS);
2068 count--;
2069 }
2070 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2071 if (count == 0) {
2072 sdmmc_delay(10);
2073 }
2074 v = HREAD4(hp, SDHC_DATA);
2075 v = le32toh(v);
2076 do {
2077 *data++ = v;
2078 v >>= 8;
2079 } while (--datalen > 0);
2080 }
2081}
2082
2083static void
2084esdhc_write_data_pio(struct sdhc_host *hp, uint8_t *data, u_int datalen)
2085{
2086 uint16_t status = HREAD2(hp, SDHC_NINTR_STATUS);
2087 uint32_t v;
2088
2089 const size_t watermark = (HREAD4(hp, SDHC_WATERMARK_LEVEL) >> SDHC_WATERMARK_WRITE_SHIFT) & SDHC_WATERMARK_WRITE_MASK;
2090 size_t count = watermark;
2091
2092 while (datalen > 3 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2093 if (count == 0) {
2094 sdmmc_delay(10);
2095 count = watermark;
2096 }
2097 v = *(uint32_t *)data;
2098 v = htole32(v);
2099 HWRITE4(hp, SDHC_DATA, v);
2100 data += 4;
2101 datalen -= 4;
2102 status = HREAD2(hp, SDHC_NINTR_STATUS);
2103 count--;
2104 }
2105 if (datalen > 0 && !ISSET(status, SDHC_TRANSFER_COMPLETE)) {
2106 if (count == 0) {
2107 sdmmc_delay(10);
2108 }
2109 v = *(uint32_t *)data;
2110 v = htole32(v);
2111 HWRITE4(hp, SDHC_DATA, v);
2112 }
2113}
2114
2115/* Prepare for another command. */
2116static int
2117sdhc_soft_reset(struct sdhc_host *hp, int mask)
2118{
2119 int timo;
2120
2121 KASSERT(mutex_owned(&hp->intr_lock));
2122
2123 DPRINTF(1,("%s: software reset reg=%08x\n", HDEVNAME(hp), mask));
2124
2125 /* Request the reset. */
2126 HWRITE1(hp, SDHC_SOFTWARE_RESET, mask);
2127
2128 /*
2129 * If necessary, wait for the controller to set the bits to
2130 * acknowledge the reset.
2131 */
2132 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_WAIT_RESET) &&
2133 ISSET(mask, (SDHC_RESET_DAT | SDHC_RESET_CMD))) {
2134 for (timo = 10000; timo > 0; timo--) {
2135 if (ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2136 break;
2137 /* Short delay because I worry we may miss it... */
2138 sdmmc_delay(1);
2139 }
2140 if (timo == 0) {
2141 DPRINTF(1,("%s: timeout for reset on\n", __func__));
2142 return ETIMEDOUT;
2143 }
2144 }
2145
2146 /*
2147 * Wait for the controller to clear the bits to indicate that
2148 * the reset has completed.
2149 */
2150 for (timo = 10; timo > 0; timo--) {
2151 if (!ISSET(HREAD1(hp, SDHC_SOFTWARE_RESET), mask))
2152 break;
2153 sdmmc_delay(10000);
2154 }
2155 if (timo == 0) {
2156 DPRINTF(1,("%s: timeout reg=%08x\n", HDEVNAME(hp),
2157 HREAD1(hp, SDHC_SOFTWARE_RESET)));
2158 return ETIMEDOUT;
2159 }
2160
2161 if (ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED)) {
2162 HSET4(hp, SDHC_DMA_CTL, SDHC_DMA_SNOOP);
2163 }
2164
2165 return 0;
2166}
2167
2168static int
2169sdhc_wait_intr(struct sdhc_host *hp, int mask, int timo, bool probing)
2170{
2171 int status, error, nointr;
2172
2173 KASSERT(mutex_owned(&hp->intr_lock));
2174
2175 mask |= SDHC_ERROR_INTERRUPT;
2176
2177 nointr = 0;
2178 status = hp->intr_status & mask;
2179 while (status == 0) {
2180 if (cv_timedwait(&hp->intr_cv, &hp->intr_lock, timo)
2181 == EWOULDBLOCK) {
2182 nointr = 1;
2183 break;
2184 }
2185 status = hp->intr_status & mask;
2186 }
2187 error = hp->intr_error_status;
2188
2189 DPRINTF(2,("%s: intr status %#x error %#x\n", HDEVNAME(hp), status,
2190 error));
2191
2192 hp->intr_status &= ~status;
2193 hp->intr_error_status &= ~error;
2194
2195 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2196 if (ISSET(error, SDHC_DMA_ERROR))
2197 device_printf(hp->sc->sc_dev,"dma error\n");
2198 if (ISSET(error, SDHC_ADMA_ERROR))
2199 device_printf(hp->sc->sc_dev,"adma error\n");
2200 if (ISSET(error, SDHC_AUTO_CMD12_ERROR))
2201 device_printf(hp->sc->sc_dev,"auto_cmd12 error\n");
2202 if (ISSET(error, SDHC_CURRENT_LIMIT_ERROR))
2203 device_printf(hp->sc->sc_dev,"current limit error\n");
2204 if (ISSET(error, SDHC_DATA_END_BIT_ERROR))
2205 device_printf(hp->sc->sc_dev,"data end bit error\n");
2206 if (ISSET(error, SDHC_DATA_CRC_ERROR))
2207 device_printf(hp->sc->sc_dev,"data crc error\n");
2208 if (ISSET(error, SDHC_DATA_TIMEOUT_ERROR))
2209 device_printf(hp->sc->sc_dev,"data timeout error\n");
2210 if (ISSET(error, SDHC_CMD_INDEX_ERROR))
2211 device_printf(hp->sc->sc_dev,"cmd index error\n");
2212 if (ISSET(error, SDHC_CMD_END_BIT_ERROR))
2213 device_printf(hp->sc->sc_dev,"cmd end bit error\n");
2214 if (ISSET(error, SDHC_CMD_CRC_ERROR))
2215 device_printf(hp->sc->sc_dev,"cmd crc error\n");
2216 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR)) {
2217 if (!probing)
2218 device_printf(hp->sc->sc_dev,"cmd timeout error\n");
2219#ifdef SDHC_DEBUG
2220 else if (sdhcdebug > 0)
2221 device_printf(hp->sc->sc_dev,"cmd timeout (expected)\n");
2222#endif
2223 }
2224 if ((error & ~SDHC_EINTR_STATUS_MASK) != 0)
2225 device_printf(hp->sc->sc_dev,"vendor error %#x\n",
2226 (error & ~SDHC_EINTR_STATUS_MASK));
2227 if (error == 0)
2228 device_printf(hp->sc->sc_dev,"no error\n");
2229
2230 /* Command timeout has higher priority than command complete. */
2231 if (ISSET(error, SDHC_CMD_TIMEOUT_ERROR))
2232 CLR(status, SDHC_COMMAND_COMPLETE);
2233
2234 /* Transfer complete has higher priority than data timeout. */
2235 if (ISSET(status, SDHC_TRANSFER_COMPLETE))
2236 CLR(error, SDHC_DATA_TIMEOUT_ERROR);
2237 }
2238
2239 if (nointr ||
2240 (ISSET(status, SDHC_ERROR_INTERRUPT) && error)) {
2241 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2242 (void)sdhc_soft_reset(hp, SDHC_RESET_CMD|SDHC_RESET_DAT);
2243 hp->intr_error_status = 0;
2244 status = 0;
2245 }
2246
2247 return status;
2248}
2249
2250/*
2251 * Established by attachment driver at interrupt priority IPL_SDMMC.
2252 */
2253int
2254sdhc_intr(void *arg)
2255{
2256 struct sdhc_softc *sc = (struct sdhc_softc *)arg;
2257 struct sdhc_host *hp;
2258 int done = 0;
2259 uint16_t status;
2260 uint16_t error;
2261
2262 /* We got an interrupt, but we don't know from which slot. */
2263 for (size_t host = 0; host < sc->sc_nhosts; host++) {
2264 hp = sc->sc_host[host];
2265 if (hp == NULL)
2266 continue;
2267
2268 mutex_enter(&hp->intr_lock);
2269
2270 if (ISSET(sc->sc_flags, SDHC_FLAG_32BIT_ACCESS)) {
2271 /* Find out which interrupts are pending. */
2272 uint32_t xstatus = HREAD4(hp, SDHC_NINTR_STATUS);
2273 status = xstatus;
2274 error = xstatus >> 16;
2275 if (ISSET(sc->sc_flags, SDHC_FLAG_USDHC) &&
2276 (xstatus & SDHC_TRANSFER_COMPLETE) &&
2277 !(xstatus & SDHC_DMA_INTERRUPT)) {
2278 /* read again due to uSDHC errata */
2279 status = xstatus = HREAD4(hp,
2280 SDHC_NINTR_STATUS);
2281 error = xstatus >> 16;
2282 }
2283 if (ISSET(sc->sc_flags,
2284 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2285 if ((error & SDHC_NINTR_STATUS_MASK) != 0)
2286 SET(status, SDHC_ERROR_INTERRUPT);
2287 }
2288 if (error)
2289 xstatus |= SDHC_ERROR_INTERRUPT;
2290 else if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2291 goto next_port; /* no interrupt for us */
2292 /* Acknowledge the interrupts we are about to handle. */
2293 HWRITE4(hp, SDHC_NINTR_STATUS, xstatus);
2294 } else {
2295 /* Find out which interrupts are pending. */
2296 error = 0;
2297 status = HREAD2(hp, SDHC_NINTR_STATUS);
2298 if (!ISSET(status, SDHC_NINTR_STATUS_MASK))
2299 goto next_port; /* no interrupt for us */
2300 /* Acknowledge the interrupts we are about to handle. */
2301 HWRITE2(hp, SDHC_NINTR_STATUS, status);
2302 if (ISSET(status, SDHC_ERROR_INTERRUPT)) {
2303 /* Acknowledge error interrupts. */
2304 error = HREAD2(hp, SDHC_EINTR_STATUS);
2305 HWRITE2(hp, SDHC_EINTR_STATUS, error);
2306 }
2307 }
2308
2309 DPRINTF(2,("%s: interrupt status=%x error=%x\n", HDEVNAME(hp),
2310 status, error));
2311
2312 /* Claim this interrupt. */
2313 done = 1;
2314
2315 if (ISSET(status, SDHC_ERROR_INTERRUPT) &&
2316 ISSET(error, SDHC_ADMA_ERROR)) {
2317 uint8_t adma_err = HREAD1(hp, SDHC_ADMA_ERROR_STATUS);
2318 printf("%s: ADMA error, status %02x\n", HDEVNAME(hp),
2319 adma_err);
2320 }
2321
2322 /*
2323 * Wake up the sdmmc event thread to scan for cards.
2324 */
2325 if (ISSET(status, SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION)) {
2326 if (hp->sdmmc != NULL) {
2327 sdmmc_needs_discover(hp->sdmmc);
2328 }
2329 if (ISSET(sc->sc_flags,
2330 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2331 HCLR4(hp, SDHC_NINTR_STATUS_EN,
2332 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2333 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2334 status & (SDHC_CARD_REMOVAL|SDHC_CARD_INSERTION));
2335 }
2336 }
2337
2338 /*
2339 * Schedule re-tuning process (UHS).
2340 */
2341 if (ISSET(status, SDHC_RETUNING_EVENT)) {
2342 atomic_swap_uint(&hp->tuning_timer_pending, 1);
2343 }
2344
2345 /*
2346 * Wake up the blocking process to service command
2347 * related interrupt(s).
2348 */
2349 if (ISSET(status, SDHC_COMMAND_COMPLETE|SDHC_ERROR_INTERRUPT|
2350 SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY|
2351 SDHC_TRANSFER_COMPLETE|SDHC_DMA_INTERRUPT)) {
2352 hp->intr_error_status |= error;
2353 hp->intr_status |= status;
2354 if (ISSET(sc->sc_flags,
2355 SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)) {
2356 HCLR4(hp, SDHC_NINTR_SIGNAL_EN,
2357 status & (SDHC_BUFFER_READ_READY|SDHC_BUFFER_WRITE_READY));
2358 }
2359 cv_broadcast(&hp->intr_cv);
2360 }
2361
2362 /*
2363 * Service SD card interrupts.
2364 */
2365 if (!ISSET(sc->sc_flags, SDHC_FLAG_ENHANCED | SDHC_FLAG_USDHC)
2366 && ISSET(status, SDHC_CARD_INTERRUPT)) {
2367 DPRINTF(0,("%s: card interrupt\n", HDEVNAME(hp)));
2368 HCLR2(hp, SDHC_NINTR_STATUS_EN, SDHC_CARD_INTERRUPT);
2369 sdmmc_card_intr(hp->sdmmc);
2370 }
2371next_port:
2372 mutex_exit(&hp->intr_lock);
2373 }
2374
2375 return done;
2376}
2377
2378kmutex_t *
2379sdhc_host_lock(struct sdhc_host *hp)
2380{
2381 return &hp->intr_lock;
2382}
2383
2384#ifdef SDHC_DEBUG
2385void
2386sdhc_dump_regs(struct sdhc_host *hp)
2387{
2388
2389 printf("0x%02x PRESENT_STATE: %x\n", SDHC_PRESENT_STATE,
2390 HREAD4(hp, SDHC_PRESENT_STATE));
2391 if (!ISSET(hp->sc->sc_flags, SDHC_FLAG_ENHANCED))
2392 printf("0x%02x POWER_CTL: %x\n", SDHC_POWER_CTL,
2393 HREAD1(hp, SDHC_POWER_CTL));
2394 printf("0x%02x NINTR_STATUS: %x\n", SDHC_NINTR_STATUS,
2395 HREAD2(hp, SDHC_NINTR_STATUS));
2396 printf("0x%02x EINTR_STATUS: %x\n", SDHC_EINTR_STATUS,
2397 HREAD2(hp, SDHC_EINTR_STATUS));
2398 printf("0x%02x NINTR_STATUS_EN: %x\n", SDHC_NINTR_STATUS_EN,
2399 HREAD2(hp, SDHC_NINTR_STATUS_EN));
2400 printf("0x%02x EINTR_STATUS_EN: %x\n", SDHC_EINTR_STATUS_EN,
2401 HREAD2(hp, SDHC_EINTR_STATUS_EN));
2402 printf("0x%02x NINTR_SIGNAL_EN: %x\n", SDHC_NINTR_SIGNAL_EN,
2403 HREAD2(hp, SDHC_NINTR_SIGNAL_EN));
2404 printf("0x%02x EINTR_SIGNAL_EN: %x\n", SDHC_EINTR_SIGNAL_EN,
2405 HREAD2(hp, SDHC_EINTR_SIGNAL_EN));
2406 printf("0x%02x CAPABILITIES: %x\n", SDHC_CAPABILITIES,
2407 HREAD4(hp, SDHC_CAPABILITIES));
2408 printf("0x%02x MAX_CAPABILITIES: %x\n", SDHC_MAX_CAPABILITIES,
2409 HREAD4(hp, SDHC_MAX_CAPABILITIES));
2410}
2411#endif
2412