kern: update initial cache management to match latest kernel

This commit is contained in:
Michael Scire 2021-07-12 18:30:01 -07:00
parent 12bf9612cb
commit 14d458522d
4 changed files with 67 additions and 71 deletions

View file

@ -172,10 +172,8 @@ namespace ams::kern::arch::arm64::cpu {
/* Cache management helpers. */
void ClearPageToZeroImpl(void *);
void FlushEntireDataCacheSharedForInit();
void FlushEntireDataCacheLocalForInit();
void InvalidateEntireInstructionCacheForInit();
void StoreEntireCacheForInit();
void FlushEntireCacheForInit();
void FlushEntireDataCache();

View file

@ -262,27 +262,6 @@ namespace ams::kern::arch::arm64::cpu {
__asm__ __volatile__("dc csw, %[v]" :: [v]"r"(sw_value) : "memory");
}
template<bool Init, typename F>
ALWAYS_INLINE void PerformCacheOperationBySetWayShared(F f) {
CacheLineIdRegisterAccessor clidr_el1;
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
for (int level = levels_of_coherency; level >= levels_of_unification; level--) {
PerformCacheOperationBySetWayImpl<Init>(level, f);
}
}
template<bool Init, typename F>
ALWAYS_INLINE void PerformCacheOperationBySetWayLocal(F f) {
CacheLineIdRegisterAccessor clidr_el1;
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
for (int level = levels_of_unification - 1; level >= 0; level--) {
PerformCacheOperationBySetWayImpl<Init>(level, f);
}
}
void StoreDataCacheBySetWay(int level) {
PerformCacheOperationBySetWayImpl<false>(level, StoreDataCacheLineBySetWayImpl);
cpu::DataSynchronizationBarrier();
@ -361,24 +340,63 @@ namespace ams::kern::arch::arm64::cpu {
}
void FlushEntireDataCacheSharedForInit() {
return PerformCacheOperationBySetWayShared<true>(FlushDataCacheLineBySetWayImpl);
void StoreEntireCacheForInit() {
/* Store local. */
{
CacheLineIdRegisterAccessor clidr_el1;
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
for (int level = 0; level != levels_of_unification; ++level) {
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
}
}
void FlushEntireDataCacheLocalForInit() {
return PerformCacheOperationBySetWayLocal<true>(FlushDataCacheLineBySetWayImpl);
/* Store shared. */
{
CacheLineIdRegisterAccessor clidr_el1;
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
for (int level = levels_of_unification; level <= levels_of_coherency; ++level) {
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
}
}
void InvalidateEntireInstructionCacheForInit() {
/* Data synchronization barrier. */
DataSynchronizationBarrierInnerShareable();
/* Invalidate instruction cache. */
InvalidateEntireInstructionCacheLocalImpl();
/* Ensure local instruction consistency. */
DataSynchronizationBarrierInnerShareable();
InstructionMemoryBarrier();
}
void FlushEntireCacheForInit() {
/* Flush data cache. */
{
/* Get levels of coherence/unificaiton. */
CacheLineIdRegisterAccessor clidr_el1;
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
/* Store cache from L1 up to (level of coherence - 1). */
for (int level = 0; level < levels_of_coherency - 1; ++level) {
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
}
/* Flush cache from (level of coherence - 1) down to L0. */
for (int level = levels_of_coherency; level > 0; --level) {
PerformCacheOperationBySetWayImpl<true>(level - 1, FlushDataCacheLineBySetWayImpl);
}
}
/* Invalidate instruction cache. */
InvalidateEntireInstructionCacheLocalImpl();
EnsureInstructionConsistency();
}
void StoreEntireCacheForInit() {
PerformCacheOperationBySetWayLocal<true>(StoreDataCacheLineBySetWayImpl);
PerformCacheOperationBySetWayShared<true>(StoreDataCacheLineBySetWayImpl);
DataSynchronizationBarrierInnerShareable();
InvalidateEntireInstructionCacheForInit();
/* Invalidate entire TLB. */
InvalidateEntireTlb();
}
void FlushEntireDataCache() {

View file

@ -383,20 +383,20 @@ _ZN3ams4kern4arch5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv:
/* const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); */
ubfx x10, x10, #0x15, 3
/* int level = levels_of_unification - 1 */
sub w9, w10, #1
/* int level = 0 */
mov x9, xzr
/* while (level >= 0) { */
/* while (level <= levels_of_unification) { */
begin_flush_cache_local_loop:
cmn w9, #1
cmp x9, x10
b.eq done_flush_cache_local_loop
/* FlushEntireDataCacheImplWithoutStack(level); */
mov w0, w9
bl _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv
/* level--; */
sub w9, w9, #1
/* level++; */
add w9, w9, #1
/* } */
b begin_flush_cache_local_loop
@ -416,23 +416,23 @@ _ZN3ams4kern4arch5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv:
/* CacheLineIdAccessor clidr_el1; */
mrs x10, clidr_el1
/* const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency(); */
ubfx x9, x10, #0x18, 3
ubfx x9, x10, #0x15, 3
/* const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); */
ubfx x10, x10, #0x15, 3
ubfx x10, x10, #0x18, 3
/* int level = levels_of_coherency */
/* int level = levels_of_unification */
/* while (level >= levels_of_unification) { */
/* while (level <= levels_of_coherency) { */
begin_flush_cache_shared_loop:
cmp w10, w9
b.gt done_flush_cache_shared_loop
cmp w9, w10
b.hi done_flush_cache_shared_loop
/* FlushEntireDataCacheImplWithoutStack(level); */
mov w0, w9
bl _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv
/* level--; */
sub w9, w9, #1
/* level++; */
add w9, w9, #1
/* } */
b begin_flush_cache_shared_loop

View file

@ -59,26 +59,6 @@ namespace ams::kern::init::loader {
}
}
void EnsureEntireDataCacheFlushed() {
/* Flush shared cache. */
cpu::FlushEntireDataCacheSharedForInit();
cpu::DataSynchronizationBarrier();
/* Flush local cache. */
cpu::FlushEntireDataCacheLocalForInit();
cpu::DataSynchronizationBarrier();
/* Flush shared cache. */
cpu::FlushEntireDataCacheSharedForInit();
cpu::DataSynchronizationBarrier();
/* Invalidate entire instruction cache. */
cpu::InvalidateEntireInstructionCacheForInit();
/* Invalidate entire TLB. */
cpu::InvalidateEntireTlb();
}
void SetupInitialIdentityMapping(KInitialPageTable &init_pt, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageTable::IPageAllocator &allocator) {
/* Map in an RWX identity mapping for the kernel. */
constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
@ -109,7 +89,7 @@ namespace ams::kern::init::loader {
PerformBoardSpecificSetup();
/* Ensure that the entire cache is flushed. */
EnsureEntireDataCacheFlushed();
cpu::FlushEntireCacheForInit();
/* Setup SCTLR_EL1. */
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/