aarch64/foundation-armv8: Basic bootblock implementation

This adds simple bootblock initialization procedures with console
ouput support for the Foundation ARMv8 model. Includes stack
setup, basic caches/tlb/mmu control routines, SCTLR register access
for different exception levels and memset.S code from ARM.

It runs on the Foundation_v8 fast model from arm (see command line
in src/mainboard/emulation/foundation-armv8/Kconfig) until loading
romstage at cbfs_load_stage(), where it currently halts. Code is
debugable using printk only (after console_init) because the
Foundation model does not provide bare metal debugging support (so
adding exception handling and stack/register dump might be useful).

BUG=None
BRANCH=none
TEST=Ran image in foundation model
Signed-off-by: Marcelo Povoa <marcelogp@chromium.org>

Change-Id: I81b12416424a02f24a80924791fc39d9411fa4b4
Reviewed-on: https://chromium-review.googlesource.com/185275
Reviewed-by: David Hendricks <dhendrix@chromium.org>
Tested-by: Marcelo Póvoa <marcelogp@chromium.org>
Commit-Queue: Marcelo Póvoa <marcelogp@chromium.org>
This commit is contained in:
Marcelo Povoa 2014-02-06 15:17:49 -08:00 committed by chrome-internal-fetch
commit c732a9d6a5
13 changed files with 862 additions and 62 deletions

View file

@ -3,6 +3,7 @@ menu "Architecture (aarch64)"
config ARM_AARCH64_OPTIONS
bool
default y
select BOOTBLOCK_CONSOLE
# select HAVE_ARCH_MEMSET
# select HAVE_ARCH_MEMCPY
# select HAVE_ARCH_MEMMOVE

View file

@ -104,14 +104,6 @@ $(objcbfs)/%.elf: $(objcbfs)/%.debug
$(OBJCOPY) --add-gnu-debuglink=$< $@.tmp
mv $@.tmp $@
stages_c = $(src)/arch/aarch64/stages.c
stages_o = $(obj)/arch/aarch64/stages.o
$(stages_o): $(stages_c) $(obj)/config.h
@printf " CC $(subst $(obj)/,,$(@))\n"
$(CC) -I. $(INCLUDES) -c -o $@ $<
################################################################################
# Build the coreboot_ram (stage 2)
@ -149,16 +141,23 @@ $(obj)/mainboard/$(MAINBOARDDIR)/romstage.pre.inc: $(src)/mainboard/$(MAINBOARDD
ramstage-y += exception.c
#ramstage-y += exception_asm.S
#bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += early_console.c
#bootblock-y += cache.c
bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += early_console.c
bootblock-y += cache.c
bootblock-y += cpu.S
#romstage-y += cache.c
romstage-y += cache.c
romstage-y += cpu.S
#romstage-y += div0.c
#romstage-$(CONFIG_EARLY_CONSOLE) += early_console.c
romstage-$(CONFIG_EARLY_CONSOLE) += early_console.c
bootblock-y += stages.c
romstage-y += stages.c
ramstage-y += stages.c
#ramstage-y += div0.c
#ramstage-y += interrupts.c
#ramstage-y += cache.c
ramstage-y += cache.c
ramstage-y += cpu.S
#ramstage-y += mmu.c
#romstage-y += eabi_compat.c

View file

@ -38,7 +38,37 @@ reset:
* before logging is turned on and may crash the machine, but at least
* the problem will show up near the code that causes it.
*/
b reset
/* FIXME: Not using supervisor mode, does it apply for aarch64? */
msr daifclr, #0xc /* Unmask Debug and System exceptions */
msr daifset, #0x3 /* Mask IRQ, FIQ */
bl arm_init_caches
/*
* Initialize the stack to a known value. This is used to check for
* stack overflow later in the boot process.
*/
ldr x0, .Stack
ldr x1, .Stack_size
sub x0, x0, x1
ldr x1, .Stack
ldr x2, =0xdeadbeefdeadbeef
init_stack_loop:
str x2, [x0]
add x0, x0, #8
cmp x0, x1
bne init_stack_loop
/* Set stackpointer in internal RAM to call bootblock main() */
call_bootblock:
ldr x0, .Stack /* Set up stack pointer */
mov sp, x0
ldr x0, =0x00000000
sub sp, sp, #16
bl main
.align 3
.Stack:
@ -59,7 +89,7 @@ part:
.long __id_end - ver /* Reverse offset to the vendor id */
.long __id_end - vendor /* Reverse offset to the vendor id */
.long __id_end - part /* Reverse offset to the part number */
.long CONFIG_ROM_SIZE /* Size of this romimage */
.long CONFIG_ROM_SIZE /* Size of this romimage */
.globl __id_end
__id_end:

View file

@ -20,14 +20,12 @@
*/
#include <bootblock_common.h>
//#include <arch/cache.h>
#include <arch/cache.h>
#include <arch/hlt.h>
#include <arch/stages.h>
#include <cbfs.h>
#include <console/console.h>
//#include "stages.c"
static int boot_cpu(void)
{
/*
@ -45,6 +43,7 @@ void main(void)
/* Globally disable MMU, caches, and branch prediction (these should
* be disabled by default on reset) */
dcache_mmu_disable();
/*
* Re-enable icache and branch prediction. MMU and dcache will be
@ -61,15 +60,13 @@ void main(void)
#ifdef CONFIG_BOOTBLOCK_CONSOLE
console_init();
//exception_init();
#endif
#if 0
entry = cbfs_load_stage(CBFS_DEFAULT_MEDIA, stage_name);
#endif
printk(BIOS_SPEW, "stage_name %s, entry %p\n", stage_name, entry);
#if 0
if (entry) stage_exit(entry);
hlt();
#endif
}

148
src/arch/aarch64/cache.c Normal file
View file

@ -0,0 +1,148 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2013 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* cache.c: Cache maintenance routines for ARMv8 (aarch64)
*
* Reference: ARM Architecture Reference Manual, ARMv8-A edition
*/
//#include <stdint.h>
#include <arch/cache.h>
void tlb_invalidate_all(void)
{
/* TLBIALL includes dTLB and iTLB on systems that have them. */
tlbiall(current_el());
dsb();
isb();
}
unsigned int dcache_line_bytes(void)
{
uint32_t ccsidr;
static unsigned int line_bytes = 0;
if (line_bytes)
return line_bytes;
ccsidr = read_ccsidr();
/* [2:0] - Indicates (Log2(number of words in cache line)) - 4 */
line_bytes = 1 << ((ccsidr & 0x7) + 4); /* words per line */
line_bytes *= sizeof(unsigned int); /* bytes per line */
return line_bytes;
}
enum dcache_op {
OP_DCCSW,
OP_DCCISW,
OP_DCISW,
OP_DCCIVAC,
OP_DCCVAC,
OP_DCIVAC,
};
/*
* Do a dcache operation by virtual address. This is useful for maintaining
* coherency in drivers which do DMA transfers and only need to perform
* cache maintenance on a particular memory range rather than the entire cache.
*/
static void dcache_op_va(void const *addr, size_t len, enum dcache_op op)
{
uint64_t line, linesize;
linesize = dcache_line_bytes();
line = (uint64_t)addr & ~(linesize - 1);
dsb();
while ((void *)line < addr + len) {
switch(op) {
case OP_DCCIVAC:
dccivac(line);
break;
case OP_DCCVAC:
dccvac(line);
break;
case OP_DCIVAC:
dcivac(line);
break;
default:
break;
}
line += linesize;
}
isb();
}
void dcache_clean_by_va(void const *addr, size_t len)
{
dcache_op_va(addr, len, OP_DCCVAC);
}
void dcache_clean_invalidate_by_va(void const *addr, size_t len)
{
dcache_op_va(addr, len, OP_DCCIVAC);
}
void dcache_invalidate_by_va(void const *addr, size_t len)
{
dcache_op_va(addr, len, OP_DCIVAC);
}
/*
* CAUTION: This implementation assumes that coreboot never uses non-identity
* page tables for pages containing executed code. If you ever want to violate
* this assumption, have fun figuring out the associated problems on your own.
*/
void dcache_mmu_disable(void)
{
uint32_t sctlr;
flush_dcache_all();
sctlr = read_sctlr(current_el());
sctlr &= ~(SCTLR_C | SCTLR_M);
write_sctlr(sctlr, current_el());
}
void dcache_mmu_enable(void)
{
uint32_t sctlr;
sctlr = read_sctlr(current_el());
sctlr |= SCTLR_C | SCTLR_M;
write_sctlr(sctlr, current_el());
}
void cache_sync_instructions(void)
{
flush_dcache_all(); /* includes trailing DSB (in assembly) */
iciallu(); /* includes BPIALLU (architecturally) */
dsb();
isb();
}

108
src/arch/aarch64/cpu.S Normal file
View file

@ -0,0 +1,108 @@
/*
* Based on arch/arm/include/asm/cacheflush.h
*
* Copyright (C) 1999-2002 Russell King.
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <arch/asm.h>
/*
* flush_dcache_all()
*
* Flush the whole D-cache.
*
* Corrupted registers: x0-x7, x9-x11
*/
ENTRY(flush_dcache_all)
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
lsr x3, x3, #23 // left align loc bit field
cbz x3, finished // if loc is 0, then no need to clean
mov x10, #0 // start clean at cache level 0
loop1:
add x2, x10, x10, lsr #1 // work out 3x current cache level
lsr x1, x0, x2 // extract cache type bits from clidr
and x1, x1, #7 // mask of the bits for current cache only
cmp x1, #2 // see what cache we have at this level
b.lt skip // skip if no cache, or just i-cache
mrs x9, daif // make CSSELR and CCSIDR access atomic
msr csselr_el1, x10 // select current cache level in csselr
isb // isb to sych the new cssr&csidr
mrs x1, ccsidr_el1 // read the new ccsidr
msr daif, x9
and x2, x1, #7 // extract the length of the cache lines
add x2, x2, #4 // add 4 (line length offset)
mov x4, #0x3ff
and x4, x4, x1, lsr #3 // find maximum number on the way size
clz x5, x4 // find bit position of way size increment
mov x7, #0x7fff
and x7, x7, x1, lsr #13 // extract max number of the index size
loop2:
mov x9, x4 // create working copy of max way size
loop3:
lsl x6, x9, x5
orr x11, x10, x6 // factor way and cache number into x11
lsl x6, x7, x2
orr x11, x11, x6 // factor index number into x11
dc cisw, x11 // clean & invalidate by set/way
subs x9, x9, #1 // decrement the way
b.ge loop3
subs x7, x7, #1 // decrement the index
b.ge loop2
skip:
add x10, x10, #2 // increment cache number
cmp x3, x10
b.gt loop1
finished:
mov x10, #0 // swith back to cache level 0
msr csselr_el1, x10 // select current cache level in csselr
dsb sy
isb
ret
ENDPROC(flush_dcache_all)
/*
* Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
* known state regarding caches/SCTLR. Completely cleans and invalidates
* icache/dcache, disables MMU and dcache (if active), and enables unaligned
* accesses, icache and branch prediction (if inactive). Clobbers x4 and x5.
*/
ENTRY(arm_init_caches)
/* w4: SCTLR, return address: x8 (stay valid for the whole function) */
mov x8, x30
/* XXX: Assume that we always start running at EL3 */
mrs x4, sctlr_el3
/* FIXME: How to enable branch prediction on ARMv8? */
msr sctlr_el3, x4
/* Flush and invalidate dcache */
bl flush_dcache_all
/* Deactivate MMU (0), Alignment Check (1) and DCache (2) */
and x4, x4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2)
msr sctlr_el3, x4
/* Invalidate icache and TLB for good measure */
ic iallu
tlbi alle3
dsb sy
isb
ret x8
ENDPROC(arm_init_caches)

View file

@ -0,0 +1,75 @@
/*
* This file is part of the coreboot project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include <console/console.h>
#include <console/vtxprintf.h>
/* FIXME: need to make console driver more generic */
void console_tx_byte(unsigned char byte)
{
if (byte == '\n')
console_tx_byte('\r');
#if CONFIG_CONSOLE_SERIAL8250MEM
if (oxford_oxpcie_present) {
uart8250_mem_tx_byte(
CONFIG_OXFORD_OXPCIE_BASE_ADDRESS + 0x1000, byte);
}
#endif
#if CONFIG_CONSOLE_SERIAL_UART
uart_tx_byte(byte);
#endif
#if CONFIG_USBDEBUG
usbdebug_tx_byte(0, byte);
#endif
#if CONFIG_CONSOLE_CBMEM && !defined(__BOOT_BLOCK__)
cbmemc_tx_byte(byte);
#endif
}
void console_tx_flush(void)
{
#if CONFIG_CONSOLE_SERIAL8250MEM
uart8250_mem_tx_flush(CONFIG_OXFORD_OXPCIE_BASE_ADDRESS + 0x1000);
#endif
#if CONFIG_CONSOLE_SERIAL_UART
uart_tx_flush();
#endif
#if CONFIG_USBDEBUG
usbdebug_tx_flush(0);
#endif
}
int do_printk(int msg_level, const char *fmt, ...)
{
va_list args;
int i;
if (msg_level > console_loglevel) {
return 0;
}
va_start(args, fmt);
i = vtxprintf(console_tx_byte, fmt, args);
va_end(args);
console_tx_flush();
return i;
}

View file

@ -27,13 +27,14 @@
* SUCH DAMAGE.
*/
#include <console/console.h>
#include <arch/exception.h>
#include <stdint.h>
#include <types.h>
#include <arch/cache.h>
#include <arch/exception.h>
#include <console/console.h>
void exception_test(void);
static int test_abort;
uint8_t exception_stack[0x100] __attribute__((aligned(8)));
extern void *exception_stack_end;
void exception_undefined_instruction(uint32_t *);
void exception_software_interrupt(uint32_t *);
@ -46,14 +47,14 @@ void exception_fiq(uint32_t *);
static void print_regs(uint32_t *regs)
{
int i;
// XXX
for (i = 0; i < 16; i++) {
if (i == 15)
printk(BIOS_ERR, "PC");
else if (i == 14)
continue; /* LR */
printk(BIOS_ERR, "LR");
else if (i == 13)
continue; /* SP */
printk(BIOS_ERR, "SP");
else if (i == 12)
printk(BIOS_ERR, "IP");
else
@ -85,13 +86,8 @@ void exception_prefetch_abort(uint32_t *regs)
void exception_data_abort(uint32_t *regs)
{
if (test_abort) {
regs[15] = regs[0];
return;
} else {
printk(BIOS_ERR, "exception _data_abort\n");
print_regs(regs);
}
printk(BIOS_ERR, "exception _data_abort\n");
print_regs(regs);
die("exception");
}
@ -104,27 +100,31 @@ void exception_not_used(uint32_t *regs)
void exception_irq(uint32_t *regs)
{
printk(BIOS_ERR, "exception _irq\n");
print_regs(regs);
die("exception");
}
void exception_fiq(uint32_t *regs)
{
}
static inline uint32_t get_sctlr(void)
{
return 0;
}
static inline void set_sctlr(uint32_t val)
{
printk(BIOS_ERR, "exception _fiq\n");
print_regs(regs);
die("exception");
}
void exception_init(void)
{
test_abort = 1;
printk(BIOS_ERR, "Testing exceptions\n");
//exception_test();
//test_abort = 0;
printk(BIOS_ERR, "Testing exceptions: DONE\n");
uint32_t sctlr = read_sctlr();
/* Handle exceptions in ARM mode. */
//sctlr &= ~SCTLR_TE;
/* Set V=0 in SCTLR so VBAR points to the exception vector table. */
//sctlr &= ~SCTLR_V;
/* Enforce alignment temporarily. */
write_sctlr(sctlr);
//extern uint32_t exception_table[];
//set_vbar((uintptr_t)exception_table);
//exception_stack_end = exception_stack + sizeof(exception_stack);
printk(BIOS_DEBUG, "Exception handlers installed.\n");
}

View file

@ -0,0 +1,38 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2013 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __ARM_AARCH64_ASM_H
#define __ARM_AARCH64_ASM_H
#define ALIGN .align 0
#define ENDPROC(name) \
.type name, %function; \
END(name)
#define ENTRY(name) \
.section .text.name, "ax", %progbits; \
.global name; \
ALIGN; \
name:
#define END(name) \
.size name, .-name
#endif /* __ARM_AARCH64_ASM_H */

View file

@ -0,0 +1,335 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2013 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* cache.h: Cache maintenance API for AARCH64
*/
#ifndef ARM_AARCH64_CACHE_H
#define ARM_AARCH64_CACHE_H
#include <config.h>
#include <stddef.h>
#include <stdint.h>
/* SCTLR_ELx common bits */
#define SCTLR_M (1 << 0) /* MMU enable */
#define SCTLR_A (1 << 1) /* Alignment check enable */
#define SCTLR_C (1 << 2) /* Data/unified cache enable */
#define SCTLR_SA (1 << 3) /* Stack alignment check enable */
#define SCTLR_I (1 << 12) /* Instruction cache enable */
#define SCTLR_WXN (1 << 19) /* Write permission implies XN */
#define SCTLR_EE (1 << 25) /* Exception endianness */
/* SCTLR_EL1 bits */
#define SCTLR_EL1_CP15B (1 << 5) /* CP15 barrier enable */
#define SCTLR_EL1_ITD (1 << 7) /* IT disable */
#define SCTLR_EL1_SED (1 << 8) /* SETEND disable */
#define SCTLR_EL1_UMA (1 << 9) /* User mask access */
#define SCTLR_EL1_DZE (1 << 14) /* DC ZVA instruction at EL0 */
#define SCTLR_EL1_UCT (1 << 15) /* CTR_EL0 register EL0 access */
#define SCTLR_EL1_NTWI (1 << 16) /* Not trap WFI */
#define SCTLR_EL1_NTWE (1 << 18) /* Not trap WFE */
#define SCTLR_EL1_E0E (1 << 24) /* Exception endianness at EL0 */
#define SCTLR_EL1_UCI (1 << 26) /* EL0 access to cache instructions */
/*
* Utility macro to choose an instruction according to the exception
* level (EL) passed, which number is concatenated between insa and insb parts
*/
#define SWITCH_EL(insa, insb, el) if (el == 1) asm volatile(insa "1" insb); \
else if (el == 2) asm volatile (insa "2" insb); \
else asm volatile (insa "3" insb)
/* get current exception level (EL1-EL3) */
static inline uint32_t current_el(void)
{
uint32_t el;
asm volatile ("mrs %0, CurrentEL" : "=r" (el));
return el >> 2;
}
/*
* Sync primitives
*/
/* data memory barrier */
static inline void dmb(void)
{
asm volatile ("dmb sy" : : : "memory");
}
/* data sync barrier */
static inline void dsb(void)
{
asm volatile ("dsb sy" : : : "memory");
}
/* instruction sync barrier */
static inline void isb(void)
{
asm volatile ("isb sy" : : : "memory");
}
/*
* Low-level TLB maintenance operations
*/
/* invalidate entire unified TLB */
static inline void tlbiall(uint32_t el)
{
SWITCH_EL("tlbi alle", : : : "memory", el);
}
/* invalidate unified TLB by VA, all ASID (EL1) */
static inline void tlbivaa(uint64_t va)
{
asm volatile("tlbi vaae1, %0" : : "r" (va) : "memory");
}
/* write translation table base register 0 (TTBR0_ELx) */
static inline void write_ttbr0(uint64_t val, uint32_t el)
{
SWITCH_EL("msr ttbr0_el", ", %0" : : "r" (val) : "memory", el);
}
/* read translation control register (TCR_ELx) */
static inline uint64_t read_tcr(uint32_t el)
{
uint64_t val = 0;
SWITCH_EL("mrs %0, tcr_el", : "=r" (val), el);
return val;
}
/* write translation control register (TCR_ELx) */
static inline void write_tcr(uint64_t val, uint32_t el)
{
SWITCH_EL("msr tcr_el", ", %0" : : "r" (val) : "memory", el);
}
/*
* Low-level cache maintenance operations
*/
/* data cache clean and invalidate by VA to PoC */
static inline void dccivac(uint64_t va)
{
asm volatile ("dc civac, %0" : : "r" (va) : "memory");
}
/* data cache clean and invalidate by set/way */
static inline void dccisw(uint64_t val)
{
asm volatile ("dc cisw, %0" : : "r" (val) : "memory");
}
/* data cache clean by VA to PoC */
static inline void dccvac(uint64_t va)
{
asm volatile ("dc cvac, %0" : : "r" (va) : "memory");
}
/* data cache clean by set/way */
static inline void dccsw(uint64_t val)
{
asm volatile ("dc csw, %0" : : "r" (val) : "memory");
}
/* data cache invalidate by VA to PoC */
static inline void dcivac(uint64_t va)
{
asm volatile ("dc ivac, %0" : : "r" (va) : "memory");
}
/* data cache invalidate by set/way */
static inline void dcisw(uint64_t val)
{
asm volatile ("dc isw, %0" : : "r" (val) : "memory");
}
/* instruction cache invalidate all */
static inline void iciallu(void)
{
asm volatile ("ic iallu" : : : "memory");
}
/*
* Cache registers functions
*/
/* read cache level ID register (CLIDR_EL1) */
static inline uint32_t read_clidr(void)
{
uint32_t val = 0;
asm volatile ("mrs %0, clidr_el1" : "=r" (val));
return val;
}
/* read cache size ID register register (CCSIDR_EL1) */
static inline uint32_t read_ccsidr(void)
{
uint32_t val = 0;
asm volatile ("mrs %0, ccsidr_el1" : "=r" (val));
return val;
}
/* read cache size selection register (CSSELR_EL1) */
static inline uint32_t read_csselr(void)
{
uint32_t val = 0;
asm volatile ("mrs %0, csselr_el1" : "=r" (val));
return val;
}
/* write to cache size selection register (CSSELR_EL1) */
static inline void write_csselr(uint32_t val)
{
/*
* Bits [3:1] - Cache level + 1 (0b000 = L1, 0b110 = L7, 0b111 is rsvd)
* Bit 0 - 0 = data or unified cache, 1 = instruction cache
*/
asm volatile ("msr csselr_el1, %0" : : "r" (val));
isb(); /* ISB to sync the change to CCSIDR_EL1 */
}
#if 0
/* read L2 control register (L2CTLR) */
static inline uint32_t read_l2ctlr(void)
{
uint32_t val = 0;
asm volatile ("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
return val;
}
/* write L2 control register (L2CTLR) */
static inline void write_l2ctlr(uint32_t val)
{
/*
* Note: L2CTLR can only be written when the L2 memory system
* is idle, ie before the MMU is enabled.
*/
asm volatile("mcr p15, 1, %0, c9, c0, 2" : : "r" (val) : "memory" );
isb();
}
/* read L2 Auxiliary Control Register (L2ACTLR) */
static inline uint32_t read_l2actlr(void)
{
uint32_t val = 0;
asm volatile ("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
return val;
}
/* write L2 Auxiliary Control Register (L2ACTLR) */
static inline void write_l2actlr(uint32_t val)
{
asm volatile ("mcr p15, 1, %0, c15, c0, 0" : : "r" (val) : "memory" );
isb();
}
#endif
/* read system control register (SCTLR_ELx) */
static inline uint32_t read_sctlr(uint32_t el)
{
uint32_t val;
SWITCH_EL("mrs %0, sctlr_el", : "=r" (val), el);
return val;
}
/* write system control register (SCTLR_ELx) */
static inline void write_sctlr(uint32_t val, uint32_t el)
{
SWITCH_EL("msr sctlr_el", ", %0" : : "r" (val) : "cc", el);
isb();
}
/*
* Cache maintenance API
*/
#if 0
/* dcache clean and invalidate all (on current level given by CCSELR) */
void dcache_clean_invalidate_all(void);
void dcache_clean_all(void);
#endif
/* dcache clean by virtual address to PoC */
void dcache_clean_by_va(void const *addr, size_t len);
/* dcache clean and invalidate by virtual address to PoC */
void dcache_clean_invalidate_by_va(void const *addr, size_t len);
/* dcache invalidate by virtual address to PoC */
void dcache_invalidate_by_va(void const *addr, size_t len);
/* dcache invalidate all */
void flush_dcache_all(void);
/* returns number of bytes per cache line */
unsigned int dcache_line_bytes(void);
/* dcache and MMU disable */
void dcache_mmu_disable(void);
/* dcache and MMU enable */
void dcache_mmu_enable(void);
/* perform all icache/dcache maintenance needed after loading new code */
void cache_sync_instructions(void);
/* tlb invalidate all */
void tlb_invalidate_all(void);
/*
* Generalized setup/init functions
*/
#if 0
/* mmu initialization (set page table address, set permissions, etc) */
void mmu_init(void);
enum dcache_policy {
DCACHE_OFF,
DCACHE_WRITEBACK,
DCACHE_WRITETHROUGH,
};
/* disable the mmu for a range. Primarily useful to lock out address 0. */
void mmu_disable_range(unsigned long start_mb, unsigned long size_mb);
/* mmu range configuration (set dcache policy) */
void mmu_config_range(unsigned long start_mb, unsigned long size_mb,
enum dcache_policy policy);
#endif
#endif /* ARM_AARCH64_CACHE_H */

52
src/arch/aarch64/memset.S Normal file
View file

@ -0,0 +1,52 @@
/*
* Copyright (C) 2013 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <arch/asm.h>
/*
* Fill in the buffer with character c (alignment handled by the hardware)
*
* Parameters:
* x0 - buf
* x1 - c
* x2 - n
* Returns:
* x0 - buf
*/
ENTRY(memset)
mov x4, x0
and w1, w1, #0xff
orr w1, w1, w1, lsl #8
orr w1, w1, w1, lsl #16
orr x1, x1, x1, lsl #32
subs x2, x2, #8
b.mi 2f
1: str x1, [x4], #8
subs x2, x2, #8
b.pl 1b
2: adds x2, x2, #4
b.mi 3f
sub x2, x2, #4
str w1, [x4], #4
3: adds x2, x2, #2
b.mi 4f
sub x2, x2, #2
strh w1, [x4], #2
4: adds x2, x2, #1
b.mi 5f
strb w1, [x4]
5: ret
ENDPROC(memset)

View file

@ -34,6 +34,7 @@
*/
#include <arch/stages.h>
#include <arch/cache.h>
void stage_entry(void)
{
@ -52,10 +53,6 @@ void stage_exit(void *addr)
* unified caches.
*/
/* Because most stages copy code to memory, it's a safe and
* hygienic thing to flush the icache here. If we knew how
* on this architecture :-)
*/
//icache_invalidate_all();
cache_sync_instructions();
doit();
}

View file

@ -12,6 +12,10 @@
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# To execute, do:
# ./Foundation_v8 --cores=1 --no-secure-memory --visualization \
# --gicv3 --data=build/coreboot.rom@0x0
if BOARD_EMULATION_FOUNDATION_ARMV8
config BOARD_SPECIFIC_OPTIONS # dummy
@ -46,21 +50,29 @@ config DRAM_SIZE_MB
int
default 1024
# Memory map
# Memory map for Foundation v2 model
# Reference: ARM v8-A Foundation Model User Guide
#
# 0xA000_0000: first instruction (called from BL3)
# 0x00_0000_0000 - 0x00_03FF_FFFF Trusted Boot ROM
# 0x00_0400_0000 - 0x00_0403_FFFF Trusted SRAM
# 0x00_0600_0000 - 0x00_07FF_FFFF Trusted DRAM
# 0x00_1C01_0000 - 0x00_1C01_FFFF System Registers
# 0x00_1C09_0000 - 0x00_1C09_FFFF UART0 (PL011)
# 0x00_2E00_0000 - 0x00_2E00_FFFF Non-trusted SRAM
# 0x00_8000_0000 - 0x00_FFFF_FFFF DRAM (0GB - 2GB)
# 0x08_8000_0000 - 0x09_FFFF_FFFF DRAM (2GB - 8GB)
config BOOTBLOCK_BASE
hex
default 0xA0000000
default 0x0
config ID_SECTION_BASE
hex
default 0xA001f000
default 0x1f000
config ROMSTAGE_BASE
hex
default 0xA0020000
default 0x20000
config ROMSTAGE_SIZE
hex
@ -103,4 +115,12 @@ config RAMTOP
hex
default 0x01100000
config STACK_TOP
hex
default 0x8000ff00
config STACK_SIZE
hex
default 0x00001000
endif # BOARD_EMULATION_FOUNDATION_ARMV8