ACPICA: minimal patch to integrate new tables into Linux
Signed-off-by: Len Brown <len.brown@intel.com>
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3f30af2..9fa3d39 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -160,7 +160,7 @@
{
if (t2 >= t1)
return (t2 - t1);
- else if (!acpi_fadt.tmr_val_ext)
+ else if (!(acpi_fadt.flags & ACPI_FADT_32BIT_TIMER))
return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
else
return ((0xFFFFFFFF - t1) + t2);
@@ -236,7 +236,7 @@
/* Dummy wait op - must do something useless after P_LVL2 read
because chipsets cannot guarantee that STPCLK# signal
gets asserted in time to freeze execution properly. */
- unused = inl(acpi_fadt.xpm_tmr_blk.address);
+ unused = inl(acpi_fadt.xpm_timer_block.address);
}
}
@@ -338,7 +338,7 @@
* detection phase, to work cleanly with logical CPU hotplug.
*/
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst && !acpi_fadt.plvl2_up)
+ !pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED))
cx = &pr->power.states[ACPI_STATE_C1];
#endif
@@ -384,11 +384,11 @@
case ACPI_STATE_C2:
/* Get start time (ticks) */
- t1 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t1 = inl(acpi_fadt.xpm_timer_block.address);
/* Invoke C2 */
acpi_cstate_enter(cx);
/* Get end time (ticks) */
- t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t2 = inl(acpi_fadt.xpm_timer_block.address);
#ifdef CONFIG_GENERIC_TIME
/* TSC halts in C2, so notify users */
@@ -420,11 +420,11 @@
}
/* Get start time (ticks) */
- t1 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t1 = inl(acpi_fadt.xpm_timer_block.address);
/* Invoke C3 */
acpi_cstate_enter(cx);
/* Get end time (ticks) */
- t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t2 = inl(acpi_fadt.xpm_timer_block.address);
if (pr->flags.bm_check) {
/* Enable bus master arbitration */
atomic_dec(&c3_cpu_count);
@@ -457,7 +457,7 @@
#ifdef CONFIG_HOTPLUG_CPU
/* Don't do promotion/demotion */
if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst && !acpi_fadt.plvl2_up) {
+ !pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
next_state = cx;
goto end;
}
@@ -627,7 +627,8 @@
* Check for P_LVL2_UP flag before entering C2 and above on
* an SMP system.
*/
- if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up)
+ if ((num_online_cpus() > 1) &&
+ !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED))
return -ENODEV;
#endif
@@ -636,8 +637,8 @@
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
/* determine latencies from FADT */
- pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
- pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
+ pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.C2latency;
+ pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.C3latency;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
@@ -883,7 +884,7 @@
* WBINVD should be set in fadt, for C3 state to be
* supported on when bm_check is not required.
*/
- if (acpi_fadt.wb_invd != 1) {
+ if (!(acpi_fadt.flags & ACPI_FADT_WBINVD)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Cache invalidation should work properly"
" for C3 to be enabled on SMP systems\n"));
@@ -1164,9 +1165,9 @@
if (!pr)
return -EINVAL;
- if (acpi_fadt.cst_cnt && !nocst) {
+ if (acpi_fadt.cst_control && !nocst) {
status =
- acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
+ acpi_os_write_port(acpi_fadt.smi_command, acpi_fadt.cst_control, 8);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Notifying BIOS of _CST ability failed"));