soc/intel/pantherlake: Add FSP-M programming

FSP-M UPDs are programmed according to the configuration (Kconfig and
device tree).

BUG=348678529
TEST=Memory is initialized successfully and hardware is programmed as
     desired on Intel pantherlake reference board.

Change-Id: Iea26d962748116fa84afdb4afcba1098a64b6988
Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/84443
Reviewed-by: Subrata Banik <subratabanik@google.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Jeremy Compostella 2024-09-25 10:04:36 -07:00 committed by Subrata Banik
commit be5745f79f
3 changed files with 547 additions and 4 deletions

View file

@ -8,9 +8,15 @@
#include <types.h>
enum mem_type {
MEM_TYPE_DDR5,
MEM_TYPE_LP5X,
};
struct mem_ddr_config {
/* Dqs Pins Interleaved Setting. Enable/Disable Control */
bool dq_pins_interleaved;
};
struct lpx_dq {
uint8_t dq0[BITS_PER_BYTE];
uint8_t dq1[BITS_PER_BYTE];
@ -83,7 +89,10 @@ struct mb_cfg {
struct lpx_dqs_map lpx_dqs_map;
};
struct mem_lp5x_config lp5x_config;
union {
struct mem_lp5x_config lp5x_config;
struct mem_ddr_config ddr_config;
};
/* Early Command Training Enable/Disable Control */
bool ect;

View file

@ -2,10 +2,221 @@
#include <fsp/util.h>
#include <soc/meminit.h>
#include <string.h>
#define LP5_PHYSICAL_CH_WIDTH 16
#define LP5_CHANNELS CHANNEL_COUNT(LP5_PHYSICAL_CH_WIDTH)
#define DDR5_PHYSICAL_CH_WIDTH 32
#define DDR5_CHANNELS CHANNEL_COUNT(DDR5_PHYSICAL_CH_WIDTH)
static void set_rcomp_config(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg)
{
if (mb_cfg->rcomp.resistor)
mem_cfg->RcompResistor = mb_cfg->rcomp.resistor;
for (size_t i = 0; i < ARRAY_SIZE(mem_cfg->RcompTarget); i++)
if (mb_cfg->rcomp.targets[i])
mem_cfg->RcompTarget[i] = mb_cfg->rcomp.targets[i];
}
static void meminit_lp5x(FSP_M_CONFIG *mem_cfg, const struct mem_lp5x_config *lp5x_config)
{
mem_cfg->DqPinsInterleaved = 0;
mem_cfg->Lp5CccConfig = lp5x_config->ccc_config;
}
static void meminit_ddr(FSP_M_CONFIG *mem_cfg, const struct mem_ddr_config *ddr_config)
{
mem_cfg->DqPinsInterleaved = ddr_config->dq_pins_interleaved;
}
static const struct soc_mem_cfg soc_mem_cfg[] = {
[MEM_TYPE_DDR5] = {
.num_phys_channels = DDR5_CHANNELS,
.phys_to_mrc_map = {
[0] = 0,
[1] = 1,
[2] = 4,
[3] = 5,
},
.md_phy_masks = {
/*
* Physical channels 0 and 1 are populated in case of
* half-populated configurations.
*/
.half_channel = BIT(0) | BIT(1),
/* In mixed topology, channels 2 and 3 are always memory-down. */
.mixed_topo = BIT(2) | BIT(3),
},
},
[MEM_TYPE_LP5X] = {
.num_phys_channels = LP5_CHANNELS,
.phys_to_mrc_map = {
[0] = 0,
[1] = 1,
[2] = 2,
[3] = 3,
[4] = 4,
[5] = 5,
[6] = 6,
[7] = 7,
},
.md_phy_masks = {
/*
* Physical channels 0, 1, 2 and 3 are populated in case
* of half-populated configurations.
*/
.half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
/* LP5x does not support mixed topology. */
},
},
};
static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data)
{
uint64_t *spd_upds[MRC_CHANNELS][CONFIG_DIMMS_PER_CHANNEL] = {
[0] = { &mem_cfg->MemorySpdPtr000, &mem_cfg->MemorySpdPtr001, },
[1] = { &mem_cfg->MemorySpdPtr010, &mem_cfg->MemorySpdPtr011, },
[2] = { &mem_cfg->MemorySpdPtr020, &mem_cfg->MemorySpdPtr021, },
[3] = { &mem_cfg->MemorySpdPtr030, &mem_cfg->MemorySpdPtr031, },
[4] = { &mem_cfg->MemorySpdPtr100, &mem_cfg->MemorySpdPtr101, },
[5] = { &mem_cfg->MemorySpdPtr110, &mem_cfg->MemorySpdPtr111, },
[6] = { &mem_cfg->MemorySpdPtr120, &mem_cfg->MemorySpdPtr121, },
[7] = { &mem_cfg->MemorySpdPtr130, &mem_cfg->MemorySpdPtr131, },
};
uint8_t *disable_channel_upds[MRC_CHANNELS] = {
&mem_cfg->DisableMc0Ch0,
&mem_cfg->DisableMc0Ch1,
&mem_cfg->DisableMc0Ch2,
&mem_cfg->DisableMc0Ch3,
&mem_cfg->DisableMc1Ch0,
&mem_cfg->DisableMc1Ch1,
&mem_cfg->DisableMc1Ch2,
&mem_cfg->DisableMc1Ch3,
};
size_t ch, dimm;
mem_cfg->MemorySpdDataLen = data->spd_len;
for (ch = 0; ch < MRC_CHANNELS; ch++) {
uint8_t *disable_channel_ptr = disable_channel_upds[ch];
bool enable_channel = 0;
for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
uint64_t *spd_ptr = spd_upds[ch][dimm];
*spd_ptr = data->spd[ch][dimm];
if (*spd_ptr)
enable_channel = 1;
}
*disable_channel_ptr = !enable_channel;
}
}
static void mem_init_dq_dqs_upds(void *upds[MRC_CHANNELS], const void *map, size_t upd_size,
const struct mem_channel_data *data, bool auto_detect)
{
for (size_t i = 0; i < MRC_CHANNELS; i++, map += upd_size) {
if (auto_detect || !channel_is_populated(i, MRC_CHANNELS,
data->ch_population_flags))
memset(upds[i], 0, upd_size);
else
memcpy(upds[i], map, upd_size);
}
}
static void mem_init_dq_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
const struct mb_cfg *mb_cfg, bool auto_detect)
{
const size_t upd_size = sizeof(mem_cfg->DqMapCpu2DramMc0Ch0);
void *dq_upds[MRC_CHANNELS] = {
&mem_cfg->DqMapCpu2DramMc0Ch0,
&mem_cfg->DqMapCpu2DramMc0Ch1,
&mem_cfg->DqMapCpu2DramMc0Ch2,
&mem_cfg->DqMapCpu2DramMc0Ch3,
&mem_cfg->DqMapCpu2DramMc1Ch0,
&mem_cfg->DqMapCpu2DramMc1Ch1,
&mem_cfg->DqMapCpu2DramMc1Ch2,
&mem_cfg->DqMapCpu2DramMc1Ch3,
};
_Static_assert(sizeof(mem_cfg->DqMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH,
"Incorrect DQ UPD size!");
mem_init_dq_dqs_upds(dq_upds, mb_cfg->dq_map, upd_size, data, auto_detect);
}
static void mem_init_dqs_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
const struct mb_cfg *mb_cfg, bool auto_detect)
{
const size_t upd_size = sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0);
void *dqs_upds[MRC_CHANNELS] = {
&mem_cfg->DqsMapCpu2DramMc0Ch0,
&mem_cfg->DqsMapCpu2DramMc0Ch1,
&mem_cfg->DqsMapCpu2DramMc0Ch2,
&mem_cfg->DqsMapCpu2DramMc0Ch3,
&mem_cfg->DqsMapCpu2DramMc1Ch0,
&mem_cfg->DqsMapCpu2DramMc1Ch1,
&mem_cfg->DqsMapCpu2DramMc1Ch2,
&mem_cfg->DqsMapCpu2DramMc1Ch3,
};
_Static_assert(sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH / 8,
"Incorrect DQS UPD size!");
mem_init_dq_dqs_upds(dqs_upds, mb_cfg->dqs_map, upd_size, data, auto_detect);
}
#define DDR5_CH_DIMM_OFFSET(ch, dimm) ((ch) * CONFIG_DIMMS_PER_CHANNEL + (dimm))
static void ddr5_fill_dimm_module_info(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg,
const struct mem_spd *spd_info)
{
for (size_t ch = 0; ch < soc_mem_cfg[MEM_TYPE_DDR5].num_phys_channels; ch++) {
for (size_t dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
size_t mrc_ch = soc_mem_cfg[MEM_TYPE_DDR5].phys_to_mrc_map[ch];
mem_cfg->SpdAddressTable[DDR5_CH_DIMM_OFFSET(mrc_ch, dimm)] =
spd_info->smbus[ch].addr_dimm[dimm] << 1;
}
}
mem_init_dq_upds(mem_cfg, NULL, mb_cfg, true);
mem_init_dqs_upds(mem_cfg, NULL, mb_cfg, true);
}
void memcfg_init(FSPM_UPD *memupd, const struct mb_cfg *mb_cfg,
const struct mem_spd *spd_info, bool half_populated)
{
/* Update after FSP is released externally released. */
struct mem_channel_data data;
bool dq_dqs_auto_detect = false;
FSP_M_CONFIG *mem_cfg = &memupd->FspmConfig;
mem_cfg->ECT = mb_cfg->ect;
mem_cfg->UserBd = mb_cfg->user_bd;
set_rcomp_config(mem_cfg, mb_cfg);
switch (mb_cfg->type) {
case MEM_TYPE_DDR5:
meminit_ddr(mem_cfg, &mb_cfg->ddr_config);
dq_dqs_auto_detect = true;
/*
* TODO: Drop this workaround once SMBus driver in coreboot is
* updated to support DDR5 EEPROM reading.
*/
if (spd_info->topo == MEM_TOPO_DIMM_MODULE) {
ddr5_fill_dimm_module_info(mem_cfg, mb_cfg, spd_info);
return;
}
break;
case MEM_TYPE_LP5X:
meminit_lp5x(mem_cfg, &mb_cfg->lp5x_config);
break;
default:
die("Unsupported memory type(%d)\n", mb_cfg->type);
}
mem_populate_channel_data(memupd, &soc_mem_cfg[mb_cfg->type], spd_info,
half_populated, &data);
mem_init_spd_upds(mem_cfg, &data);
mem_init_dq_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
mem_init_dqs_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
}

View file

@ -1,10 +1,333 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <cpu/intel/common/common.h>
#include <cpu/x86/msr.h>
#include <fsp/fsp_debug_event.h>
#include <fsp/util.h>
#include <intelblocks/cpulib.h>
#include <soc/iomap.h>
#include <soc/msr.h>
#include <soc/pcie.h>
#include <soc/romstage.h>
#include <soc/soc_info.h>
#define FSP_CLK_NOTUSED 0xff
#define FSP_CLK_LAN 0x70
#define FSP_CLK_FREE_RUNNING 0x80
static void fill_fspm_igd_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
const struct ddi_port_upds {
uint8_t *ddc;
uint8_t *hpd;
} ddi_port_upds[] = {
[DDI_PORT_A] = {&m_cfg->DdiPortADdc, &m_cfg->DdiPortAHpd},
[DDI_PORT_B] = {&m_cfg->DdiPortBDdc, &m_cfg->DdiPortBHpd},
[DDI_PORT_C] = {&m_cfg->DdiPortCDdc, &m_cfg->DdiPortCHpd},
[DDI_PORT_1] = {&m_cfg->DdiPort1Ddc, &m_cfg->DdiPort1Hpd},
[DDI_PORT_2] = {&m_cfg->DdiPort2Ddc, &m_cfg->DdiPort2Hpd},
[DDI_PORT_3] = {&m_cfg->DdiPort3Ddc, &m_cfg->DdiPort3Hpd},
[DDI_PORT_4] = {&m_cfg->DdiPort4Ddc, &m_cfg->DdiPort4Hpd},
};
m_cfg->InternalGraphics = !CONFIG(SOC_INTEL_DISABLE_IGD) && is_devfn_enabled(PCI_DEVFN_IGD);
if (m_cfg->InternalGraphics) {
/* IGD is enabled, set IGD stolen size to 128MB. */
m_cfg->IgdDvmt50PreAlloc = IGD_SM_128MB;
/* DP port config */
m_cfg->DdiPortAConfig = config->ddi_port_A_config;
m_cfg->DdiPortBConfig = config->ddi_port_B_config;
for (size_t i = 0; i < ARRAY_SIZE(ddi_port_upds); i++) {
*ddi_port_upds[i].ddc = !!(config->ddi_ports_config[i] &
DDI_ENABLE_DDC);
*ddi_port_upds[i].hpd = !!(config->ddi_ports_config[i] &
DDI_ENABLE_HPD);
}
/* Disable memory bandwidth compression */
m_cfg->MemoryBandwidthCompression = 0;
} else {
/* IGD is disabled, skip IGD init in FSP. */
m_cfg->IgdDvmt50PreAlloc = 0;
/* DP port config */
m_cfg->DdiPortAConfig = 0;
m_cfg->DdiPortBConfig = 0;
for (size_t i = 0; i < ARRAY_SIZE(ddi_port_upds); i++) {
*ddi_port_upds[i].ddc = 0;
*ddi_port_upds[i].hpd = 0;
}
}
}
static void fill_fspm_mrc_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
m_cfg->SaGv = config->sagv;
if (m_cfg->SaGv) {
/*
* Set SaGv work points after reviewing the power and performance impact
* with SaGv set to 1 (Enabled) and various work points between 0-3 being
* enabled.
*/
if (config->sagv_wp_bitmap)
m_cfg->SaGvWpMask = config->sagv_wp_bitmap;
else
m_cfg->SaGvWpMask = SAGV_POINTS_0_1_2_3;
}
m_cfg->RMT = config->rmt;
m_cfg->MrcFastBoot = 1;
}
static void fill_fspm_cpu_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
/* CpuRatio Settings */
if (config->cpu_ratio_override)
m_cfg->CpuRatio = config->cpu_ratio_override;
else
/* Set CpuRatio to match existing MSR value */
m_cfg->CpuRatio = (rdmsr(MSR_FLEX_RATIO).lo >> 8) & 0xff;
m_cfg->PrmrrSize = get_valid_prmrr_size();
m_cfg->TsegSize = CONFIG_SMM_TSEG_SIZE;
m_cfg->SmmRelocationEnable = 0;
}
static void fill_fspm_security_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
m_cfg->TmeEnable = CONFIG(INTEL_TME) && is_tme_supported();
}
static void fill_fspm_uart_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
if (CONFIG(DRIVERS_UART_8250IO))
m_cfg->PcdIsaSerialUartBase = ISA_SERIAL_BASE_ADDR_3F8;
m_cfg->SerialIoUartDebugMode = PchSerialIoSkipInit;
m_cfg->SerialIoUartDebugControllerNumber = CONFIG_UART_FOR_CONSOLE;
}
static void fill_fspm_ipu_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
/* Image clock: disable all clocks for bypassing FSP pin mux */
memset(m_cfg->ImguClkOutEn, 0, sizeof(m_cfg->ImguClkOutEn));
/* IPU */
m_cfg->SaIpuEnable = is_devfn_enabled(PCI_DEVFN_IPU);
}
static void fill_fspm_smbus_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
m_cfg->SmbusEnable = is_devfn_enabled(PCI_DEVFN_SMBUS);
}
static void fill_fspm_misc_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
/* Skip CPU replacement check */
m_cfg->SkipCpuReplacementCheck = !config->cpu_replacement_check;
/* Skip GPIO configuration from FSP */
m_cfg->GpioOverride = 1;
/* Skip MBP HOB */
m_cfg->SkipMbpHob = !CONFIG(FSP_PUBLISH_MBP_HOB);
m_cfg->SkipExtGfxScan = config->skip_ext_gfx_scan;
m_cfg->DlvrRfiEnable = 1;
}
static void fill_fspm_audio_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
/* Audio: HDAUDIO_LINK_MODE I2S/SNDW */
m_cfg->PchHdaEnable = is_devfn_enabled(PCI_DEVFN_HDA);
m_cfg->PchHdaDspEnable = config->pch_hda_dsp_enable;
m_cfg->PchHdaIDispLinkTmode = config->pch_hda_idisp_link_tmode;
m_cfg->PchHdaIDispLinkFrequency = config->pch_hda_idisp_link_frequency;
m_cfg->PchHdaIDispCodecDisconnect = !config->pch_hda_idisp_codec_enable;
for (int i = 0; i < MAX_HD_AUDIO_SDI_LINKS; i++)
m_cfg->PchHdaSdiEnable[i] = !!config->pch_hda_sdi_enable[i];
/*
* All the PchHdaAudioLink{Hda|Dmic|Ssp|Sndw}Enable UPDs are used by FSP
* only to configure GPIO pads for audio. Mainboard is expected to
* perform all GPIO configuration in coreboot and hence these UPDs are
* set to 0 to skip FSP GPIO configuration for audio pads.
*/
m_cfg->PchHdaAudioLinkHdaEnable = 1;
memset(m_cfg->PchHdaAudioLinkDmicEnable, 0, sizeof(m_cfg->PchHdaAudioLinkDmicEnable));
memset(m_cfg->PchHdaAudioLinkSspEnable, 0, sizeof(m_cfg->PchHdaAudioLinkSspEnable));
memset(m_cfg->PchHdaAudioLinkSndwEnable, 0, sizeof(m_cfg->PchHdaAudioLinkSndwEnable));
}
static void pcie_rp_init(FSP_M_CONFIG *m_cfg, uint32_t en_mask,
const struct pcie_rp_config *cfg, size_t cfg_count)
{
unsigned int clk_req_mapping = 0;
for (size_t i = 0; i < cfg_count; i++) {
if (CONFIG(SOC_INTEL_COMPLIANCE_TEST_MODE)) {
m_cfg->PcieClkSrcUsage[i] = FSP_CLK_FREE_RUNNING;
continue;
}
if (!(en_mask & BIT(i)))
continue;
if (!cfg[i].flags && !cfg[i].clk_src && !cfg[i].clk_req) {
printk(BIOS_WARNING, "Missing root port clock structure definition\n");
continue;
}
if (clk_req_mapping & (1 << cfg[i].clk_req))
printk(BIOS_WARNING, "Found overlapped clkreq assignment on clk req %d\n",
cfg[i].clk_req);
if (!(cfg[i].flags & PCIE_RP_CLK_REQ_UNUSED)) {
m_cfg->PcieClkSrcClkReq[cfg[i].clk_src] = cfg[i].clk_req;
clk_req_mapping |= 1 << cfg[i].clk_req;
}
m_cfg->PcieClkSrcUsage[cfg[i].clk_src] = i;
}
}
static void fill_fspm_pcie_rp_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
/* Disable all PCIe clock sources by default. And set RP irrelevant clock. */
uint8_t max_clock = get_max_pcie_clock();
for (size_t i = 0; i < max_clock; i++) {
if (config->pcie_clk_config_flag[i] & PCIE_CLK_FREE_RUNNING)
m_cfg->PcieClkSrcUsage[i] = FSP_CLK_FREE_RUNNING;
else if (config->pcie_clk_config_flag[i] & PCIE_CLK_LAN)
m_cfg->PcieClkSrcUsage[i] = FSP_CLK_LAN;
else
m_cfg->PcieClkSrcUsage[i] = FSP_CLK_NOTUSED;
m_cfg->PcieClkSrcClkReq[i] = FSP_CLK_NOTUSED;
}
/* PCIE ports */
m_cfg->PcieRpEnableMask = pcie_rp_enable_mask(get_pcie_rp_table());
pcie_rp_init(m_cfg, m_cfg->PcieRpEnableMask, config->pcie_rp,
get_max_pcie_port());
}
static void fill_fspm_ish_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
m_cfg->PchIshEnable = is_devfn_enabled(PCI_DEVFN_ISH);
}
static void fill_fspm_tcss_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
/* Tcss USB */
m_cfg->TcssXhciEn = is_devfn_enabled(PCI_DEVFN_TCSS_XHCI);
/* Enable TCSS port */
m_cfg->TcssPort0 = config->tcss_cap_policy[0];
m_cfg->TcssPort1 = config->tcss_cap_policy[1];
m_cfg->TcssPort2 = config->tcss_cap_policy[2];
m_cfg->TcssPort3 = config->tcss_cap_policy[3];
}
static void fill_fspm_vtd_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
m_cfg->VtdDisable = 0;
m_cfg->VtdBaseAddress[0] = GFXVT_BASE_ADDRESS;
m_cfg->VtdBaseAddress[1] = VTVC0_BASE_ADDRESS;
m_cfg->VtdBaseAddress[2] = IOCVTD_BASE_ADDRESS;
/* Change VmxEnable UPD value according to ENABLE_VMX Kconfig */
m_cfg->VmxEnable = CONFIG(ENABLE_VMX);
}
static void fill_fspm_trace_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
if (!CONFIG(SOC_INTEL_COMMON_BLOCK_TRACEHUB))
return;
m_cfg->PlatformDebugOption = CONFIG_SOC_INTEL_COMMON_DEBUG_CONSENT;
m_cfg->CpuCrashLogEnable = CONFIG(SOC_INTEL_CRASHLOG);
m_cfg->DciEn = 1;
}
static void fill_fspm_thermal_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
m_cfg->TccActivationOffset = config->tcc_offset;
m_cfg->TccActivationOffset = config->tcc_offset;
}
static void fill_fspm_vr_config_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
for (size_t i = 0; i < ARRAY_SIZE(m_cfg->EnableFastVmode); i++)
m_cfg->EnableFastVmode[i] = 0;
}
static void soc_memory_init_params(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config)
{
void (*fill_fspm_params[])(FSP_M_CONFIG *m_cfg,
const struct soc_intel_pantherlake_config *config) = {
fill_fspm_igd_params,
fill_fspm_mrc_params,
fill_fspm_cpu_params,
fill_fspm_security_params,
fill_fspm_uart_params,
fill_fspm_ipu_params,
fill_fspm_smbus_params,
fill_fspm_misc_params,
fill_fspm_audio_params,
fill_fspm_pcie_rp_params,
fill_fspm_ish_params,
fill_fspm_tcss_params,
fill_fspm_vtd_params,
fill_fspm_trace_params,
fill_fspm_thermal_params,
fill_fspm_vr_config_params,
};
for (size_t i = 0; i < ARRAY_SIZE(fill_fspm_params); i++)
fill_fspm_params[i](m_cfg, config);
}
static void fill_fsp_event_handler(FSPM_ARCH2_UPD *arch_upd, FSP_M_CONFIG *m_cfg)
{
if (!CONFIG(FSP_USES_CB_DEBUG_EVENT_HANDLER))
return;
if (!CONFIG(CONSOLE_SERIAL) || !CONFIG(FSP_ENABLE_SERIAL_DEBUG)) {
/* Disable Serial debug message */
m_cfg->PcdSerialDebugLevel = 0;
/* Disable MRC debug message */
m_cfg->SerialDebugMrcLevel = 0;
return;
}
enum fsp_log_level log_level = fsp_map_console_log_level();
arch_upd->FspEventHandler = (uintptr_t)((FSP_EVENT_HANDLER *)fsp_debug_event_handler);
/* Set Serial debug message level */
m_cfg->PcdSerialDebugLevel = log_level;
/* Set MRC debug level */
m_cfg->SerialDebugMrcLevel = log_level;
}
void platform_fsp_memory_init_params_cb(FSPM_UPD *mupd, uint32_t version)
{
/* TODO: Placeholder for overriding FSP-M UPDs */
const struct soc_intel_pantherlake_config *config = config_of_soc();
FSP_M_CONFIG *m_cfg = &mupd->FspmConfig;
FSPM_ARCH2_UPD *arch_upd = &mupd->FspmArchUpd;
fill_fsp_event_handler(arch_upd, m_cfg);
soc_memory_init_params(m_cfg, config);
mainboard_memory_init_params(mupd);
}
__weak void mainboard_memory_init_params(FSPM_UPD *memupd)