arch/x86: Unify GDT entries

Currently there are 3 GDTs (Global Descriptor Tables) being used on x86:
- preRAM (gdt_init.S)
- SMM (smm_stub.S)
- RAM (c_start.S)

They have different layouts and thus different offsets for the segments
being used in assembly code. Stop using different GDT segments and
ensure that for ROM (preRAM + SMM) and RAM (ramstage) the segments
match. RAM will have additional entries, not found in pre RAM GDT,
but the segments for protected mode and 64-bit mode now match in
all stages.

This allows to use the same defines in all stages. It also drops the
need to know in which stage the code is compiled and it's no longer
necessary to switch the code segment between stages.

While at it fix the comments in the ramstage GDT and drop unused
declarations from header files, always set the accessed bit and drop
GDT_CODE_ACPI_SEG.

Change-Id: I208496e6e4cc82833636f4f42503b44b0d702b9e
Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/87255
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Shuo Liu <shuo.liu@intel.com>
Reviewed-by: Maximilian Brune <maximilian.brune@9elements.com>
This commit is contained in:
Patrick Rudolph 2025-04-10 08:51:55 +02:00 committed by Matt DeVillier
commit a6be271e63
14 changed files with 89 additions and 135 deletions

View file

@ -17,12 +17,8 @@
#endif
#endif
#include <cpu/x86/gdt.h>
#include <cpu/x86/msr.h>
#if defined(__RAMSTAGE__)
#include <arch/ram_segs.h>
#else
#include <arch/rom_segs.h>
#endif
.macro setup_longmode page_table
/* Get page table address */
@ -48,12 +44,8 @@
movl %eax, %cr0
/* use long jump to switch to 64-bit code segment */
#if defined(__RAMSTAGE__)
ljmp $RAM_CODE_SEG64, $jmp_addr\@
#else
ljmp $ROM_CODE_SEG64, $jmp_addr\@
ljmp $GDT_CODE_SEG64, $jmp_addr\@
#endif
.code64
jmp_addr\@:
.endm

View file

@ -10,17 +10,9 @@
*/
.code64
#include <cpu/x86/gdt.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/cr.h>
#if defined(__RAMSTAGE__)
#include <arch/ram_segs.h>
#define CODE_SEG RAM_CODE_SEG
#define DATA_SEG RAM_DATA_SEG
#else
#include <arch/rom_segs.h>
#define CODE_SEG ROM_CODE_SEG
#define DATA_SEG ROM_DATA_SEG
#endif
drop_longmode:
#if !ENV_CACHE_AS_RAM
@ -28,7 +20,7 @@ drop_longmode:
wbinvd
#endif
/* Set 32-bit code segment and ss */
mov $CODE_SEG, %rcx
mov $GDT_CODE_SEG, %rcx
/* SetCodeSelector32 will drop us to protected mode on return */
call SetCodeSelector32
@ -63,7 +55,7 @@ __longmode_compatibility:
/* Running in 32-bit compatibility mode */
/* Use flat data segment */
movl $DATA_SEG, %eax
movl $GDT_DATA_SEG, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss

View file

@ -27,7 +27,7 @@
/* Start code to put an i386 or later processor into 32-bit protected mode.
*/
#include <arch/rom_segs.h>
#include <cpu/x86/gdt.h>
#include <cpu/x86/post_code.h>
.section .init._start, "ax", @progbits
@ -136,7 +136,7 @@ _start16bit:
movl %ebp, %eax
/* Now that we are in protected mode jump to a 32 bit code segment. */
ljmpl $ROM_CODE_SEG, $bootblock_protected_mode_entry
ljmpl $GDT_CODE_SEG, $bootblock_protected_mode_entry
/**
* The gdt is defined in gdt_init.S, it has a 4 Gb code segment

View file

@ -11,7 +11,7 @@
*
*/
#include <arch/rom_segs.h>
#include <cpu/x86/gdt.h>
#include <cpu/x86/cr.h>
#include <cpu/x86/post_code.h>
@ -33,7 +33,7 @@ bootblock_protected_mode_entry:
post_code(POSTCODE_ENTER_PROTECTED_MODE)
movw $ROM_DATA_SEG, %ax
movw $GDT_DATA_SEG, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss

View file

@ -1,9 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <cpu/x86/cr.h>
#include <cpu/x86/gdt.h>
#include <cpu/amd/mtrr.h>
#include <cpu/x86/msr.h>
#include <arch/ram_segs.h>
#define __RAMSTAGE__
#include <cpu/x86/64bit/entry64.inc>
@ -77,10 +77,10 @@ _start:
orl $CR0_SET_FLAGS, %eax
movl %eax, %cr0
ljmpl $RAM_CODE_SEG, $1f
ljmpl $GDT_CODE_SEG, $1f
1:
.code32
movw $RAM_DATA_SEG, %ax
movw $GDT_DATA_SEG, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss

View file

@ -9,8 +9,8 @@
* found in smm.h.
*/
#include <arch/rom_segs.h>
#include <cpu/x86/cr.h>
#include <cpu/x86/gdt.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/lapic_def.h>
#include <cpu/x86/64bit/entry64.inc>
@ -94,7 +94,7 @@ untampered_lapic:
movl %eax, %cr0
/* Enable protected mode */
ljmpl $ROM_CODE_SEG, $smm_trampoline32
ljmpl $GDT_CODE_SEG, $smm_trampoline32
.align 4
smm_relocate_gdt:
@ -125,7 +125,7 @@ smm_relocate_gdt_end:
.global smm_trampoline32
smm_trampoline32:
/* Use flat data segment */
movw $ROM_DATA_SEG, %ax
movw $GDT_DATA_SEG, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss