From ee9b0fa199a25838e08a0c7ce351af3e88bda9b0 Mon Sep 17 00:00:00 2001 From: Subrata Banik Date: Sat, 5 Apr 2025 08:08:52 +0000 Subject: [PATCH] cpu/x86: Conditionally invalidate caches based on self-snooping support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code currently unconditionally flushes or invalidates the entire cache (using `clflush_region` or `wbinvd`) after loading the SIPI vector for APs and after loading SMM handlers. This commit modifies this behavior to only perform these cache operations if the CPU does *not* support self-snooping. Self-snooping CPUs can maintain cache coherency within the core/ complex more efficiently. CPU with self-snoop enabled does not necessarily need to perform wbinvd to ensure data written to the cache is reflected in main memory. Self-snooping CPUs employ a write-back caching policy, combined with a cache coherence protocol, to manage data writes and ensure consistency between cache and main memory. When the BSP writes the SIPI vector or SMM handlers to memory, other units within the same CPU that might be caching these regions should be aware of the updates through the self-snooping mechanism. A full cache flush or invalidate to ensure cache contains reaches to main memory might be unnecessary and could negatively impact performance. By conditionally performing these cache operations based on `self_snooping_supported()`, we can optimize the boot process for CPUs that have advanced cache coherency features while maintaining correct behavior on older or simpler CPUs. TEST=Boot google/rex, brox and fatcat with this patch. Able to reduce boot time by ~19-25ms. Change-Id: If32439752d0ceaa03b1d81873ea0bc562092e9d5 Signed-off-by: Subrata Banik Reviewed-on: https://review.coreboot.org/c/coreboot/+/87182 Reviewed-by: Shuo Liu Reviewed-by: Jérémy Compostella Reviewed-by: Kapil Porwal Tested-by: build bot (Jenkins) --- src/cpu/x86/cache/cache.c | 10 ++++++---- src/cpu/x86/mp_init.c | 20 ++++++++++++-------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/cpu/x86/cache/cache.c b/src/cpu/x86/cache/cache.c index 11524e65db..48718d2ae4 100644 --- a/src/cpu/x86/cache/cache.c +++ b/src/cpu/x86/cache/cache.c @@ -49,8 +49,10 @@ void arch_segment_loaded(uintptr_t start, size_t size, int flags) if (!cbmem_online()) return; - if (clflush_supported()) - clflush_region(start, size); - else - printk(BIOS_DEBUG, "Not flushing cache to RAM, CLFLUSH not supported\n"); + if (!self_snooping_supported()) { + if (clflush_supported()) + clflush_region(start, size); + else + printk(BIOS_DEBUG, "Not flushing cache to RAM, CLFLUSH not supported\n"); + } } diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index 574f28e379..f223ccc6e5 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -365,12 +365,14 @@ static atomic_t *load_sipi_vector(struct mp_params *mp_params) ap_count = &sp->ap_count; atomic_set(ap_count, 0); - /* Make sure SIPI data hits RAM so the APs that come up will see the - startup code even if the caches are disabled. */ - if (clflush_supported()) - clflush_region((uintptr_t)mod_loc, module_size); - else - wbinvd(); + if (!self_snooping_supported()) { + /* Make sure SIPI data hits RAM so the APs that come up will see the + startup code even if the caches are disabled. */ + if (clflush_supported()) + clflush_region((uintptr_t)mod_loc, module_size); + else + wbinvd(); + } return ap_count; } @@ -826,8 +828,10 @@ static void load_smm_handlers(void) smm_disable(); } - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - wbinvd(); + if (!self_snooping_supported()) { + /* Ensure the SMM handlers hit DRAM before performing first SMI. */ + wbinvd(); + } /* * Indicate that the SMM handlers have been loaded and MP