kern: switch memset to optimized impl

This commit is contained in:
Michael Scire 2020-07-29 17:45:23 -07:00 committed by SciresM
parent 9ddb4194b3
commit 7352d87b20
5 changed files with 25 additions and 17 deletions

View file

@ -23,6 +23,17 @@
namespace ams::kern::arch::arm64::init { namespace ams::kern::arch::arm64::init {
inline void ClearPhysicalMemory(KPhysicalAddress address, size_t size) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, sizeof(u64)));
/* This Physical Address -> void * conversion is valid, because this is init page table code. */
/* The MMU is necessarily not yet turned on, if we are creating an initial page table. */
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(GetInteger(address));
for (size_t i = 0; i < size / sizeof(u64); ++i) {
ptr[i] = 0;
}
}
class KInitialPageTable { class KInitialPageTable {
public: public:
class IPageAllocator { class IPageAllocator {
@ -61,9 +72,7 @@ namespace ams::kern::arch::arm64::init {
} }
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) { static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) {
/* This Physical Address -> void * conversion is valid, because this is page table code. */ ClearPhysicalMemory(address, PageSize);
/* The MMU is necessarily not yet turned on, if we are creating an initial page table. */
std::memset(reinterpret_cast<void *>(GetInteger(address)), 0, PageSize);
} }
private: private:
size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) { size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) {
@ -705,7 +714,7 @@ namespace ams::kern::arch::arm64::init {
this->state.next_address += PageSize; this->state.next_address += PageSize;
} }
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize); ClearPhysicalMemory(allocated, PageSize);
return allocated; return allocated;
} }

View file

@ -76,11 +76,13 @@ L(set96):
.p2align 4 .p2align 4
L(set_long): L(set_long):
stp val, val, [dstin] stp val, val, [dstin]
bic dst, dstin, 15
#if DC_ZVA_THRESHOLD #if DC_ZVA_THRESHOLD
cmp count, DC_ZVA_THRESHOLD cmp count, DC_ZVA_THRESHOLD
ccmp val, 0, 0, cs ccmp val, 0, 0, cs
bic dst, dstin, 15
b.eq L(zva_64) b.eq L(zva_64)
#else
bic dst, dstin, 15
#endif #endif
/* Small-size or non-zero memset does not use DC ZVA. */ /* Small-size or non-zero memset does not use DC ZVA. */
sub count, dstend, dst sub count, dstend, dst

View file

@ -19,6 +19,6 @@
#define MESOSPHERE_LIBC_MEMCPY_GENERIC 0 #define MESOSPHERE_LIBC_MEMCPY_GENERIC 0
#define MESOSPHERE_LIBC_MEMCMP_GENERIC 0 #define MESOSPHERE_LIBC_MEMCMP_GENERIC 0
#define MESOSPHERE_LIBC_MEMMOVE_GENERIC 0 #define MESOSPHERE_LIBC_MEMMOVE_GENERIC 0
#define MESOSPHERE_LIBC_MEMSET_GENERIC 1 #define MESOSPHERE_LIBC_MEMSET_GENERIC 0
#define MESOSPHERE_LIBC_STRNCPY_GENERIC 1 #define MESOSPHERE_LIBC_STRNCPY_GENERIC 1
#define MESOSPHERE_LIBC_STRNCMP_GENERIC 1 #define MESOSPHERE_LIBC_STRNCMP_GENERIC 1

View file

@ -241,9 +241,6 @@ namespace ams::kern::init::loader {
RelocateKernelPhysically(base_address, layout); RelocateKernelPhysically(base_address, layout);
/* Validate kernel layout. */ /* Validate kernel layout. */
/* TODO: constexpr 0x1000 definition somewhere. */
/* In stratosphere, this is os::MemoryPageSize. */
/* We don't have ams::os, this may go in hw:: or something. */
const uintptr_t rx_offset = layout->rx_offset; const uintptr_t rx_offset = layout->rx_offset;
const uintptr_t rx_end_offset = layout->rx_end_offset; const uintptr_t rx_end_offset = layout->rx_end_offset;
const uintptr_t ro_offset = layout->ro_offset; const uintptr_t ro_offset = layout->ro_offset;
@ -251,12 +248,12 @@ namespace ams::kern::init::loader {
const uintptr_t rw_offset = layout->rw_offset; const uintptr_t rw_offset = layout->rw_offset;
/* UNUSED: const uintptr_t rw_end_offset = layout->rw_end_offset; */ /* UNUSED: const uintptr_t rw_end_offset = layout->rw_end_offset; */
const uintptr_t bss_end_offset = layout->bss_end_offset; const uintptr_t bss_end_offset = layout->bss_end_offset;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_offset, 0x1000)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_offset, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_end_offset, 0x1000)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_end_offset, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_offset, 0x1000)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_offset, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_end_offset, 0x1000)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_end_offset, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rw_offset, 0x1000)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rw_offset, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(bss_end_offset, 0x1000)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(bss_end_offset, PageSize));
const uintptr_t bss_offset = layout->bss_offset; const uintptr_t bss_offset = layout->bss_offset;
const uintptr_t ini_load_offset = layout->ini_load_offset; const uintptr_t ini_load_offset = layout->ini_load_offset;
const uintptr_t dynamic_offset = layout->dynamic_offset; const uintptr_t dynamic_offset = layout->dynamic_offset;