From patchwork Sat Dec 18 11:30:17 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: SMM for AMD K8 - next attempt Date: Sat, 18 Dec 2010 11:30:17 -0000 From: Rudolf Marek X-Patchwork-Id: 2439 Message-Id: <4D0C9B49.3040806@assembler.cz> To: Stefan Reinauer Cc: coreboot@coreboot.org Hi, OK, here are fixes on top of your patch to make it work again ;) It seems you missed my change of bit6 any idea why? + southbridge_smi_cmd, // [6] We don't need Aclose either - this is for access to videoRAM for example (but our data can reside in Aseg... so we dont want this. I tested this on single core and it works. Question is if it works on dualcore. Signed-off-by: Rudolf Marek Thanks, Rudolf Index: coreboot/src/cpu/amd/smm/smm_init.c =================================================================== --- coreboot.orig/src/cpu/amd/smm/smm_init.c 2010-12-18 11:09:50.000000000 +0100 +++ coreboot/src/cpu/amd/smm/smm_init.c 2010-12-18 11:24:11.000000000 +0100 @@ -57,13 +57,13 @@ smm_handler_copied = 1; - /* MTRR changes don't like an enabled cache */ - disable_cache(); - /* Back up MSRs for later restore */ syscfg_orig = rdmsr(SYSCFG_MSR); mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); + /* MTRR changes don't like an enabled cache */ + disable_cache(); + msr = syscfg_orig; /* Allow changes to MTRR extended attributes */ msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; @@ -78,60 +78,46 @@ msr.lo = 0x18181818; msr.hi = 0x18181818; wrmsr(MTRRfix16K_A0000_MSR, msr); - enable_cache(); - /* disable the extended features */ + /* enable the extended features */ msr = syscfg_orig; msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); - /* enable the SMM memory window */ - // TODO does "Enable ASEG SMRAM Range" have to happen on - // every CPU core? - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= (1 << 0); // Enable ASEG SMRAM Range - msr.lo &= ~(1 << 2); // Open ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - + enable_cache(); /* copy the real SMM handler */ memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); wbinvd(); - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= ~(1 << 2); // Close ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - - /* Change SYSCFG so we can restore the MTRR */ - msr = syscfg_orig; - msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; - msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; - wrmsr(SYSCFG_MSR, msr); - /* Restore MTRR */ disable_cache(); - wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); /* Restore SYSCFG */ wrmsr(SYSCFG_MSR, syscfg_orig); + + wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); enable_cache(); } + /* But set SMM base address on all CPUs/cores */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); -} -void smm_lock(void) -{ - // TODO I think this should be running on each CPU - msr_t msr; - - printk(BIOS_DEBUG, "Locking SMM.\n"); + /* enable the SMM memory window */ + msr = rdmsr(SMM_MASK_MSR); + msr.lo |= (1 << 0); // Enable ASEG SMRAM Range + wrmsr(SMM_MASK_MSR, msr); /* Set SMMLOCK to avoid exploits messing with SMM */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); } + +void smm_lock(void) +{ + /* We lock SMM per CPU core */ +} Index: coreboot/src/southbridge/via/vt8237r/smihandler.c =================================================================== --- coreboot.orig/src/southbridge/via/vt8237r/smihandler.c 2010-12-18 11:34:15.000000000 +0100 +++ coreboot/src/southbridge/via/vt8237r/smihandler.c 2010-12-18 11:36:53.000000000 +0100 @@ -208,9 +208,9 @@ NULL, // [1] NULL, // [2] NULL, // [3] - southbridge_smi_cmd, // [4] + NULL, // [4] NULL, // [5] - NULL, // [6] + southbridge_smi_cmd, // [6] NULL, // [7] NULL, // [8] NULL, // [9]