1 | /* $NetBSD: acpi_cpu_cstate.c,v 1.59 2012/02/25 17:22:52 jruoho Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi> |
5 | * All rights reserved. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 | * SUCH DAMAGE. |
28 | */ |
29 | #include <sys/cdefs.h> |
30 | __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.59 2012/02/25 17:22:52 jruoho Exp $" ); |
31 | |
32 | #include <sys/param.h> |
33 | #include <sys/cpu.h> |
34 | #include <sys/device.h> |
35 | #include <sys/kernel.h> |
36 | #include <sys/mutex.h> |
37 | #include <sys/timetc.h> |
38 | |
39 | #include <dev/acpi/acpireg.h> |
40 | #include <dev/acpi/acpivar.h> |
41 | #include <dev/acpi/acpi_cpu.h> |
42 | #include <dev/acpi/acpi_timer.h> |
43 | |
44 | #include <machine/acpi_machdep.h> |
45 | |
46 | #define _COMPONENT ACPI_BUS_COMPONENT |
47 | ACPI_MODULE_NAME ("acpi_cpu_cstate" ) |
48 | |
49 | static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *); |
50 | static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *, |
51 | ACPI_OBJECT *); |
52 | static void acpicpu_cstate_cst_bios(void); |
53 | static void acpicpu_cstate_memset(struct acpicpu_softc *); |
54 | static ACPI_STATUS acpicpu_cstate_dep(struct acpicpu_softc *); |
55 | static void acpicpu_cstate_fadt(struct acpicpu_softc *); |
56 | static void acpicpu_cstate_quirks(struct acpicpu_softc *); |
57 | static int acpicpu_cstate_latency(struct acpicpu_softc *); |
58 | static bool acpicpu_cstate_bm_check(void); |
59 | static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int); |
60 | |
61 | extern struct acpicpu_softc **acpicpu_sc; |
62 | |
63 | /* |
64 | * XXX: The local APIC timer (as well as TSC) is typically stopped in C3. |
65 | * For now, we cannot but disable C3. But there appears to be timer- |
66 | * related interrupt issues also in C2. The only entirely safe option |
67 | * at the moment is to use C1. |
68 | */ |
69 | #ifdef ACPICPU_ENABLE_C3 |
70 | static int cs_state_max = ACPI_STATE_C3; |
71 | #else |
72 | static int cs_state_max = ACPI_STATE_C1; |
73 | #endif |
74 | |
75 | void |
76 | acpicpu_cstate_attach(device_t self) |
77 | { |
78 | struct acpicpu_softc *sc = device_private(self); |
79 | ACPI_STATUS rv; |
80 | |
81 | /* |
82 | * Either use the preferred _CST or resort to FADT. |
83 | */ |
84 | rv = acpicpu_cstate_cst(sc); |
85 | |
86 | switch (rv) { |
87 | |
88 | case AE_OK: |
89 | acpicpu_cstate_cst_bios(); |
90 | break; |
91 | |
92 | default: |
93 | sc->sc_flags |= ACPICPU_FLAG_C_FADT; |
94 | acpicpu_cstate_fadt(sc); |
95 | break; |
96 | } |
97 | |
98 | /* |
99 | * Query the optional _CSD. |
100 | */ |
101 | rv = acpicpu_cstate_dep(sc); |
102 | |
103 | if (ACPI_SUCCESS(rv)) |
104 | sc->sc_flags |= ACPICPU_FLAG_C_DEP; |
105 | |
106 | sc->sc_flags |= ACPICPU_FLAG_C; |
107 | |
108 | acpicpu_cstate_quirks(sc); |
109 | } |
110 | |
111 | void |
112 | acpicpu_cstate_detach(device_t self) |
113 | { |
114 | struct acpicpu_softc *sc = device_private(self); |
115 | |
116 | if ((sc->sc_flags & ACPICPU_FLAG_C) == 0) |
117 | return; |
118 | |
119 | (void)acpicpu_md_cstate_stop(); |
120 | |
121 | sc->sc_flags &= ~ACPICPU_FLAG_C; |
122 | } |
123 | |
124 | void |
125 | acpicpu_cstate_start(device_t self) |
126 | { |
127 | struct acpicpu_softc *sc = device_private(self); |
128 | |
129 | (void)acpicpu_md_cstate_start(sc); |
130 | } |
131 | |
132 | void |
133 | acpicpu_cstate_suspend(void *aux) |
134 | { |
135 | /* Nothing. */ |
136 | } |
137 | |
138 | void |
139 | acpicpu_cstate_resume(void *aux) |
140 | { |
141 | acpicpu_cstate_callback(aux); |
142 | } |
143 | |
144 | void |
145 | acpicpu_cstate_callback(void *aux) |
146 | { |
147 | struct acpicpu_softc *sc; |
148 | device_t self = aux; |
149 | |
150 | sc = device_private(self); |
151 | |
152 | if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) |
153 | return; |
154 | |
155 | mutex_enter(&sc->sc_mtx); |
156 | (void)acpicpu_cstate_cst(sc); |
157 | mutex_exit(&sc->sc_mtx); |
158 | } |
159 | |
160 | static ACPI_STATUS |
161 | acpicpu_cstate_cst(struct acpicpu_softc *sc) |
162 | { |
163 | struct acpicpu_cstate *cs = sc->sc_cstate; |
164 | ACPI_OBJECT *elm, *obj; |
165 | ACPI_BUFFER buf; |
166 | ACPI_STATUS rv; |
167 | uint32_t i, n; |
168 | uint8_t count; |
169 | |
170 | rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST" , &buf); |
171 | |
172 | if (ACPI_FAILURE(rv)) |
173 | return rv; |
174 | |
175 | obj = buf.Pointer; |
176 | |
177 | if (obj->Type != ACPI_TYPE_PACKAGE) { |
178 | rv = AE_TYPE; |
179 | goto out; |
180 | } |
181 | |
182 | if (obj->Package.Count < 2) { |
183 | rv = AE_LIMIT; |
184 | goto out; |
185 | } |
186 | |
187 | elm = obj->Package.Elements; |
188 | |
189 | if (elm[0].Type != ACPI_TYPE_INTEGER) { |
190 | rv = AE_TYPE; |
191 | goto out; |
192 | } |
193 | |
194 | n = elm[0].Integer.Value; |
195 | |
196 | if (n != obj->Package.Count - 1) { |
197 | rv = AE_BAD_VALUE; |
198 | goto out; |
199 | } |
200 | |
201 | if (n > ACPI_C_STATES_MAX) { |
202 | rv = AE_LIMIT; |
203 | goto out; |
204 | } |
205 | |
206 | acpicpu_cstate_memset(sc); |
207 | |
208 | /* |
209 | * All x86 processors should support C1 (a.k.a. HALT). |
210 | */ |
211 | cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; |
212 | |
213 | CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1); |
214 | CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3); |
215 | |
216 | for (count = 0, i = 1; i <= n; i++) { |
217 | |
218 | elm = &obj->Package.Elements[i]; |
219 | rv = acpicpu_cstate_cst_add(sc, elm); |
220 | |
221 | if (ACPI_SUCCESS(rv)) |
222 | count++; |
223 | } |
224 | |
225 | rv = (count != 0) ? AE_OK : AE_NOT_EXIST; |
226 | |
227 | out: |
228 | if (buf.Pointer != NULL) |
229 | ACPI_FREE(buf.Pointer); |
230 | |
231 | return rv; |
232 | } |
233 | |
234 | static ACPI_STATUS |
235 | acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm) |
236 | { |
237 | struct acpicpu_cstate *cs = sc->sc_cstate; |
238 | struct acpicpu_cstate state; |
239 | struct acpicpu_reg *reg; |
240 | ACPI_STATUS rv = AE_OK; |
241 | ACPI_OBJECT *obj; |
242 | uint32_t type; |
243 | |
244 | (void)memset(&state, 0, sizeof(*cs)); |
245 | |
246 | if (elm->Type != ACPI_TYPE_PACKAGE) { |
247 | rv = AE_TYPE; |
248 | goto out; |
249 | } |
250 | |
251 | if (elm->Package.Count != 4) { |
252 | rv = AE_LIMIT; |
253 | goto out; |
254 | } |
255 | |
256 | /* |
257 | * Type. |
258 | */ |
259 | obj = &elm->Package.Elements[1]; |
260 | |
261 | if (obj->Type != ACPI_TYPE_INTEGER) { |
262 | rv = AE_TYPE; |
263 | goto out; |
264 | } |
265 | |
266 | type = obj->Integer.Value; |
267 | |
268 | if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) { |
269 | rv = AE_TYPE; |
270 | goto out; |
271 | } |
272 | |
273 | /* |
274 | * Latency. |
275 | */ |
276 | obj = &elm->Package.Elements[2]; |
277 | |
278 | if (obj->Type != ACPI_TYPE_INTEGER) { |
279 | rv = AE_TYPE; |
280 | goto out; |
281 | } |
282 | |
283 | state.cs_latency = obj->Integer.Value; |
284 | |
285 | /* |
286 | * Power. |
287 | */ |
288 | obj = &elm->Package.Elements[3]; |
289 | |
290 | if (obj->Type != ACPI_TYPE_INTEGER) { |
291 | rv = AE_TYPE; |
292 | goto out; |
293 | } |
294 | |
295 | state.cs_power = obj->Integer.Value; |
296 | |
297 | /* |
298 | * Register. |
299 | */ |
300 | obj = &elm->Package.Elements[0]; |
301 | |
302 | if (obj->Type != ACPI_TYPE_BUFFER) { |
303 | rv = AE_TYPE; |
304 | goto out; |
305 | } |
306 | |
307 | CTASSERT(sizeof(struct acpicpu_reg) == 15); |
308 | |
309 | if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) { |
310 | rv = AE_LIMIT; |
311 | goto out; |
312 | } |
313 | |
314 | reg = (struct acpicpu_reg *)obj->Buffer.Pointer; |
315 | |
316 | switch (reg->reg_spaceid) { |
317 | |
318 | case ACPI_ADR_SPACE_SYSTEM_IO: |
319 | state.cs_method = ACPICPU_C_STATE_SYSIO; |
320 | |
321 | if (reg->reg_addr == 0) { |
322 | rv = AE_AML_ILLEGAL_ADDRESS; |
323 | goto out; |
324 | } |
325 | |
326 | if (reg->reg_bitwidth != 8) { |
327 | rv = AE_AML_BAD_RESOURCE_LENGTH; |
328 | goto out; |
329 | } |
330 | |
331 | state.cs_addr = reg->reg_addr; |
332 | break; |
333 | |
334 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
335 | state.cs_method = ACPICPU_C_STATE_FFH; |
336 | |
337 | switch (type) { |
338 | |
339 | case ACPI_STATE_C1: |
340 | |
341 | /* |
342 | * If ACPI wants native access (FFH), but the |
343 | * MD code does not support MONITOR/MWAIT, use |
344 | * HLT for C1 and error out for higher C-states. |
345 | */ |
346 | if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) |
347 | state.cs_method = ACPICPU_C_STATE_HALT; |
348 | |
349 | break; |
350 | |
351 | case ACPI_STATE_C3: /* FALLTHROUGH */ |
352 | state.cs_flags = ACPICPU_FLAG_C_BM_STS; |
353 | |
354 | default: |
355 | |
356 | if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) { |
357 | rv = AE_SUPPORT; |
358 | goto out; |
359 | } |
360 | } |
361 | |
362 | if (sc->sc_cap != 0) { |
363 | |
364 | /* |
365 | * The _CST FFH GAS encoding may contain |
366 | * additional hints on Intel processors. |
367 | * Use these to determine whether we can |
368 | * avoid the bus master activity check. |
369 | */ |
370 | if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0) |
371 | state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS; |
372 | } |
373 | |
374 | break; |
375 | |
376 | default: |
377 | rv = AE_AML_INVALID_SPACE_ID; |
378 | goto out; |
379 | } |
380 | |
381 | cs[type].cs_addr = state.cs_addr; |
382 | cs[type].cs_power = state.cs_power; |
383 | cs[type].cs_flags = state.cs_flags; |
384 | cs[type].cs_method = state.cs_method; |
385 | cs[type].cs_latency = state.cs_latency; |
386 | |
387 | out: |
388 | if (ACPI_FAILURE(rv)) |
389 | aprint_error_dev(sc->sc_dev, "failed to add " |
390 | "C-state: %s\n" , AcpiFormatException(rv)); |
391 | |
392 | return rv; |
393 | } |
394 | |
395 | static void |
396 | acpicpu_cstate_cst_bios(void) |
397 | { |
398 | const uint8_t val = AcpiGbl_FADT.CstControl; |
399 | const uint32_t addr = AcpiGbl_FADT.SmiCommand; |
400 | |
401 | if (addr == 0 || val == 0) |
402 | return; |
403 | |
404 | (void)AcpiOsWritePort(addr, val, 8); |
405 | } |
406 | |
407 | static void |
408 | acpicpu_cstate_memset(struct acpicpu_softc *sc) |
409 | { |
410 | uint8_t i = 0; |
411 | |
412 | while (i < __arraycount(sc->sc_cstate)) { |
413 | |
414 | sc->sc_cstate[i].cs_addr = 0; |
415 | sc->sc_cstate[i].cs_power = 0; |
416 | sc->sc_cstate[i].cs_flags = 0; |
417 | sc->sc_cstate[i].cs_method = 0; |
418 | sc->sc_cstate[i].cs_latency = 0; |
419 | |
420 | i++; |
421 | } |
422 | } |
423 | |
424 | static ACPI_STATUS |
425 | acpicpu_cstate_dep(struct acpicpu_softc *sc) |
426 | { |
427 | ACPI_OBJECT *elm, *obj; |
428 | ACPI_BUFFER buf; |
429 | ACPI_STATUS rv; |
430 | uint32_t val; |
431 | uint8_t i, n; |
432 | |
433 | rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CSD" , &buf); |
434 | |
435 | if (ACPI_FAILURE(rv)) |
436 | goto out; |
437 | |
438 | obj = buf.Pointer; |
439 | |
440 | if (obj->Type != ACPI_TYPE_PACKAGE) { |
441 | rv = AE_TYPE; |
442 | goto out; |
443 | } |
444 | |
445 | if (obj->Package.Count != 1) { |
446 | rv = AE_LIMIT; |
447 | goto out; |
448 | } |
449 | |
450 | elm = &obj->Package.Elements[0]; |
451 | |
452 | if (obj->Type != ACPI_TYPE_PACKAGE) { |
453 | rv = AE_TYPE; |
454 | goto out; |
455 | } |
456 | |
457 | n = elm->Package.Count; |
458 | |
459 | if (n != 6) { |
460 | rv = AE_LIMIT; |
461 | goto out; |
462 | } |
463 | |
464 | elm = elm->Package.Elements; |
465 | |
466 | for (i = 0; i < n; i++) { |
467 | |
468 | if (elm[i].Type != ACPI_TYPE_INTEGER) { |
469 | rv = AE_TYPE; |
470 | goto out; |
471 | } |
472 | |
473 | if (elm[i].Integer.Value > UINT32_MAX) { |
474 | rv = AE_AML_NUMERIC_OVERFLOW; |
475 | goto out; |
476 | } |
477 | } |
478 | |
479 | val = elm[1].Integer.Value; |
480 | |
481 | if (val != 0) |
482 | aprint_debug_dev(sc->sc_dev, "invalid revision in _CSD\n" ); |
483 | |
484 | val = elm[3].Integer.Value; |
485 | |
486 | if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) { |
487 | rv = AE_AML_BAD_RESOURCE_VALUE; |
488 | goto out; |
489 | } |
490 | |
491 | val = elm[4].Integer.Value; |
492 | |
493 | if (val > sc->sc_ncpus) { |
494 | rv = AE_BAD_VALUE; |
495 | goto out; |
496 | } |
497 | |
498 | sc->sc_cstate_dep.dep_domain = elm[2].Integer.Value; |
499 | sc->sc_cstate_dep.dep_type = elm[3].Integer.Value; |
500 | sc->sc_cstate_dep.dep_ncpus = elm[4].Integer.Value; |
501 | sc->sc_cstate_dep.dep_index = elm[5].Integer.Value; |
502 | |
503 | out: |
504 | if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) |
505 | aprint_debug_dev(sc->sc_dev, "failed to evaluate " |
506 | "_CSD: %s\n" , AcpiFormatException(rv)); |
507 | |
508 | if (buf.Pointer != NULL) |
509 | ACPI_FREE(buf.Pointer); |
510 | |
511 | return rv; |
512 | } |
513 | |
514 | static void |
515 | acpicpu_cstate_fadt(struct acpicpu_softc *sc) |
516 | { |
517 | struct acpicpu_cstate *cs = sc->sc_cstate; |
518 | |
519 | acpicpu_cstate_memset(sc); |
520 | |
521 | /* |
522 | * All x86 processors should support C1 (a.k.a. HALT). |
523 | */ |
524 | cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; |
525 | |
526 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) == 0) |
527 | aprint_debug_dev(sc->sc_dev, "HALT not supported?\n" ); |
528 | |
529 | if (sc->sc_object.ao_pblkaddr == 0) |
530 | return; |
531 | |
532 | if (sc->sc_ncpus > 1) { |
533 | |
534 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) |
535 | return; |
536 | } |
537 | |
538 | cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO; |
539 | cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO; |
540 | |
541 | cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency; |
542 | cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency; |
543 | |
544 | cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4; |
545 | cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5; |
546 | |
547 | /* |
548 | * The P_BLK length should always be 6. If it |
549 | * is not, reduce functionality accordingly. |
550 | */ |
551 | if (sc->sc_object.ao_pblklen < 5) |
552 | cs[ACPI_STATE_C2].cs_method = 0; |
553 | |
554 | if (sc->sc_object.ao_pblklen < 6) |
555 | cs[ACPI_STATE_C3].cs_method = 0; |
556 | |
557 | /* |
558 | * Sanity check the latency levels in FADT. Values above |
559 | * the thresholds may be used to inform that C2 and C3 are |
560 | * not supported -- AMD family 11h is an example; |
561 | * |
562 | * Advanced Micro Devices: BIOS and Kernel Developer's |
563 | * Guide (BKDG) for AMD Family 11h Processors. Section |
564 | * 2.4.3, Revision 3.00, July, 2008. |
565 | */ |
566 | CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100); |
567 | CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000); |
568 | |
569 | if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX) |
570 | cs[ACPI_STATE_C2].cs_method = 0; |
571 | |
572 | if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX) |
573 | cs[ACPI_STATE_C3].cs_method = 0; |
574 | } |
575 | |
576 | static void |
577 | acpicpu_cstate_quirks(struct acpicpu_softc *sc) |
578 | { |
579 | const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock; |
580 | const uint32_t len = AcpiGbl_FADT.Pm2ControlLength; |
581 | |
582 | /* |
583 | * Disable C3 for PIIX4. |
584 | */ |
585 | if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0) { |
586 | sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; |
587 | return; |
588 | } |
589 | |
590 | /* |
591 | * Check bus master arbitration. If ARB_DIS |
592 | * is not available, processor caches must be |
593 | * flushed before C3 (ACPI 4.0, section 8.2). |
594 | */ |
595 | if (reg != 0 && len != 0) { |
596 | sc->sc_flags |= ACPICPU_FLAG_C_ARB; |
597 | return; |
598 | } |
599 | |
600 | /* |
601 | * Disable C3 entirely if WBINVD is not present. |
602 | */ |
603 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0) |
604 | sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; |
605 | else { |
606 | /* |
607 | * If WBINVD is present and functioning properly, |
608 | * flush all processor caches before entering C3. |
609 | */ |
610 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) |
611 | sc->sc_flags &= ~ACPICPU_FLAG_C_BM; |
612 | else |
613 | sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; |
614 | } |
615 | } |
616 | |
617 | static int |
618 | acpicpu_cstate_latency(struct acpicpu_softc *sc) |
619 | { |
620 | static const uint32_t cs_factor = 3; |
621 | struct acpicpu_cstate *cs; |
622 | int i; |
623 | |
624 | KASSERT(mutex_owned(&sc->sc_mtx) != 0); |
625 | |
626 | for (i = cs_state_max; i > 0; i--) { |
627 | |
628 | cs = &sc->sc_cstate[i]; |
629 | |
630 | if (__predict_false(cs->cs_method == 0)) |
631 | continue; |
632 | |
633 | /* |
634 | * Choose a state if we have previously slept |
635 | * longer than the worst case latency of the |
636 | * state times an arbitrary multiplier. |
637 | */ |
638 | if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor) |
639 | return i; |
640 | } |
641 | |
642 | return ACPI_STATE_C1; |
643 | } |
644 | |
645 | /* |
646 | * The main idle loop. |
647 | */ |
648 | void |
649 | acpicpu_cstate_idle(void) |
650 | { |
651 | struct cpu_info *ci = curcpu(); |
652 | struct acpicpu_softc *sc; |
653 | int state; |
654 | |
655 | KASSERT(acpicpu_sc != NULL); |
656 | KASSERT(ci->ci_acpiid < maxcpus); |
657 | |
658 | sc = acpicpu_sc[ci->ci_acpiid]; |
659 | |
660 | if (__predict_false(sc == NULL)) |
661 | return; |
662 | |
663 | KASSERT(ci->ci_ilevel == IPL_NONE); |
664 | KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0); |
665 | |
666 | if (__predict_false(sc->sc_cold != false)) |
667 | return; |
668 | |
669 | if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) |
670 | return; |
671 | |
672 | state = acpicpu_cstate_latency(sc); |
673 | mutex_exit(&sc->sc_mtx); |
674 | |
675 | /* |
676 | * Apply AMD C1E quirk. |
677 | */ |
678 | if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0) |
679 | acpicpu_md_quirk_c1e(); |
680 | |
681 | /* |
682 | * Check for bus master activity. Note that particularly usb(4) |
683 | * causes high activity, which may prevent the use of C3 states. |
684 | */ |
685 | if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { |
686 | |
687 | if (acpicpu_cstate_bm_check() != false) |
688 | state--; |
689 | |
690 | if (__predict_false(sc->sc_cstate[state].cs_method == 0)) |
691 | state = ACPI_STATE_C1; |
692 | } |
693 | |
694 | KASSERT(state != ACPI_STATE_C0); |
695 | |
696 | if (state != ACPI_STATE_C3) { |
697 | acpicpu_cstate_idle_enter(sc, state); |
698 | return; |
699 | } |
700 | |
701 | /* |
702 | * On all recent (Intel) CPUs caches are shared |
703 | * by CPUs and bus master control is required to |
704 | * keep these coherent while in C3. Flushing the |
705 | * CPU caches is only the last resort. |
706 | */ |
707 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) |
708 | ACPI_FLUSH_CPU_CACHE(); |
709 | |
710 | /* |
711 | * Allow the bus master to request that any given |
712 | * CPU should return immediately to C0 from C3. |
713 | */ |
714 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) |
715 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); |
716 | |
717 | /* |
718 | * It may be necessary to disable bus master arbitration |
719 | * to ensure that bus master cycles do not occur while |
720 | * sleeping in C3 (see ACPI 4.0, section 8.1.4). |
721 | */ |
722 | if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) |
723 | (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); |
724 | |
725 | acpicpu_cstate_idle_enter(sc, state); |
726 | |
727 | /* |
728 | * Disable bus master wake and re-enable the arbiter. |
729 | */ |
730 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) |
731 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); |
732 | |
733 | if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) |
734 | (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); |
735 | } |
736 | |
737 | static void |
738 | acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state) |
739 | { |
740 | struct acpicpu_cstate *cs = &sc->sc_cstate[state]; |
741 | uint32_t end, start, val; |
742 | |
743 | start = acpitimer_read_fast(NULL); |
744 | |
745 | switch (cs->cs_method) { |
746 | |
747 | case ACPICPU_C_STATE_FFH: |
748 | case ACPICPU_C_STATE_HALT: |
749 | acpicpu_md_cstate_enter(cs->cs_method, state); |
750 | break; |
751 | |
752 | case ACPICPU_C_STATE_SYSIO: |
753 | (void)AcpiOsReadPort(cs->cs_addr, &val, 8); |
754 | break; |
755 | } |
756 | |
757 | cs->cs_evcnt.ev_count++; |
758 | end = acpitimer_read_fast(NULL); |
759 | sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000; |
760 | } |
761 | |
762 | static bool |
763 | acpicpu_cstate_bm_check(void) |
764 | { |
765 | uint32_t val = 0; |
766 | ACPI_STATUS rv; |
767 | |
768 | rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val); |
769 | |
770 | if (ACPI_FAILURE(rv) || val == 0) |
771 | return false; |
772 | |
773 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); |
774 | |
775 | return true; |
776 | } |
777 | |