From fd2cdf206d10d05353b852174ab39f9ae42084cf Mon Sep 17 00:00:00 2001 From: Patrick Rudolph Date: Fri, 30 Jan 2026 15:19:59 +0100 Subject: [PATCH] cpu/intel/smm/gen1: Optimize cpu_has_alternative_smrr For most targets it's known if the CPU supports alternative SMRR registers or not. Only on model_6fx runtime detection is necessary. On all platforms this allows the compiler to optimize the code and thus shrink the code size if alternative SMRR aren't supported. TEST=On Lenovo X220 the ramstage is 308 bytes smaller. Change-Id: I3a965d142f79ad587b8cedc9b4646b05e2a45f8b Signed-off-by: Patrick Rudolph Reviewed-on: https://review.coreboot.org/c/coreboot/+/91014 Reviewed-by: Paul Menzel Tested-by: build bot (Jenkins) Reviewed-by: Angel Pons --- src/cpu/intel/model_1067x/mp_init.c | 2 +- src/cpu/intel/smm/gen1/smmrelocate.c | 19 ++++++++----------- src/include/cpu/intel/smm_reloc.h | 2 -- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/src/cpu/intel/model_1067x/mp_init.c b/src/cpu/intel/model_1067x/mp_init.c index bc53214310..8f04bf9ccc 100644 --- a/src/cpu/intel/model_1067x/mp_init.c +++ b/src/cpu/intel/model_1067x/mp_init.c @@ -48,7 +48,7 @@ static void pre_mp_smm_init(void) static void per_cpu_smm_trigger(void) { msr_t mtrr_cap = rdmsr(MTRR_CAP_MSR); - if (cpu_has_alternative_smrr() && mtrr_cap.lo & SMRR_SUPPORTED) { + if (mtrr_cap.lo & SMRR_SUPPORTED) { set_feature_ctrl_vmx(); msr_t ia32_ft_ctrl = rdmsr(IA32_FEATURE_CONTROL); /* We don't care if the lock is already setting diff --git a/src/cpu/intel/smm/gen1/smmrelocate.c b/src/cpu/intel/smm/gen1/smmrelocate.c index c57a661ef7..6f3806dede 100644 --- a/src/cpu/intel/smm/gen1/smmrelocate.c +++ b/src/cpu/intel/smm/gen1/smmrelocate.c @@ -28,20 +28,17 @@ /* On model_6fx, model_1067x and model_106cx SMRR functions slightly differently. The MSR are at different location from the rest and need to be explicitly enabled in IA32_FEATURE_CONTROL MSR. */ -bool cpu_has_alternative_smrr(void) +static inline bool cpu_has_alternative_smrr(void) { + if (CONFIG(CPU_INTEL_MODEL_1067X) || + CONFIG(CPU_INTEL_MODEL_106CX)) + return true; + if (!CONFIG(CPU_INTEL_MODEL_6FX)) + return false; + /* Runtime detection as model_6fx also supports Fam 6 Model 16h */ struct cpuinfo_x86 c; get_fms(&c, cpuid_eax(1)); - if (c.x86 != 6) - return false; - switch (c.x86_model) { - case 0xf: - case 0x17: /* core2 */ - case 0x1c: /* Bonnell */ - return true; - default: - return false; - } + return c.x86 == 6 && c.x86_model == 0xf; } static void write_smrr_alt(struct smm_relocation_params *relo_params) diff --git a/src/include/cpu/intel/smm_reloc.h b/src/include/cpu/intel/smm_reloc.h index 6b1a525d6c..2b25c75705 100644 --- a/src/include/cpu/intel/smm_reloc.h +++ b/src/include/cpu/intel/smm_reloc.h @@ -51,8 +51,6 @@ void smm_initialize(void); void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size); void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase); -bool cpu_has_alternative_smrr(void); - #define MSR_PRMRR_PHYS_BASE 0x1f4 #define MSR_PRMRR_PHYS_MASK 0x1f5 #define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4