mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
kernel_ldr: finish implementing all core logic.
This commit is contained in:
parent
623b5f4eb9
commit
8efdd04fcd
20 changed files with 854 additions and 81 deletions
5
libraries/config/arch/arm64/cpu/cortex_a57/cpu.mk
Normal file
5
libraries/config/arch/arm64/cpu/cortex_a57/cpu.mk
Normal file
|
@ -0,0 +1,5 @@
|
|||
export ATMOSPHERE_DEFINES += -DATMOSPHERE_CPU_ARM_CORTEX_A57
|
||||
export ATMOSPHERE_SETTINGS += -mtune=cortex-a57
|
||||
export ATMOSPHERE_CFLAGS +=
|
||||
export ATMOSPHERE_CXXFLAGS +=
|
||||
export ATMOSPHERE_ASFLAGS +=
|
|
@ -1,5 +1,5 @@
|
|||
export ATMOSPHERE_DEFINES += -DATMOSPHERE_BOARD_NINTENDO_SWITCH -D__SWITCH__
|
||||
export ATMOSPHERE_SETTINGS += -mtune=cortex-a57
|
||||
export ATMOSPHERE_SETTINGS +=
|
||||
export ATMOSPHERE_CFLAGS +=
|
||||
export ATMOSPHERE_CXXFLAGS +=
|
||||
export ATMOSPHERE_ASFLAGS +=
|
|
@ -9,6 +9,10 @@ ifeq ($(strip $(ATMOSPHERE_BOARD)),)
|
|||
export ATMOSPHERE_BOARD := nx-hac-001
|
||||
endif
|
||||
|
||||
ifeq ($(strip $(ATMOSPHERE_CPU)),)
|
||||
export ATMOSPHERE_CPU := arm-cortex-a57
|
||||
endif
|
||||
|
||||
export ATMOSPHERE_DEFINES := -DATMOSPHERE
|
||||
export ATMOSPHERE_SETTINGS := -fPIE -g
|
||||
export ATMOSPHERE_CFLAGS := -Wall -ffunction-sections -fdata-sections -fno-strict-aliasing -fwrapv \
|
||||
|
@ -27,14 +31,21 @@ export ATMOSPHERE_BOARD_NAME := nintendo_switch
|
|||
export ATMOSPHERE_OS_NAME := horizon
|
||||
endif
|
||||
|
||||
ifeq ($(ATMOSPHERE_CPU),arm-cortex-a57)
|
||||
export ATMOSPHERE_CPU_DIR := arch/arm64/cpu/cortex_a57
|
||||
export ATMOSPHERE_CPU_NAME := arm_cortex_a57
|
||||
endif
|
||||
|
||||
|
||||
export ATMOSPHERE_ARCH_MAKE_DIR := $(ATMOSPHERE_CONFIG_MAKE_DIR)/$(ATMOSPHERE_ARCH_DIR)
|
||||
export ATMOSPHERE_BOARD_MAKE_DIR := $(ATMOSPHERE_CONFIG_MAKE_DIR)/$(ATMOSPHERE_BOARD_DIR)
|
||||
export ATMOSPHERE_OS_MAKE_DIR := $(ATMOSPHERE_CONFIG_MAKE_DIR)/$(ATMOSPHERE_OS_DIR)
|
||||
export ATMOSPHERE_CPU_MAKE_DIR := $(ATMOSPHERE_CONFIG_MAKE_DIR)/$(ATMOSPHERE_CPU_DIR)
|
||||
|
||||
include $(ATMOSPHERE_ARCH_MAKE_DIR)/arch.mk
|
||||
include $(ATMOSPHERE_BOARD_MAKE_DIR)/board.mk
|
||||
include $(ATMOSPHERE_OS_MAKE_DIR)/os.mk
|
||||
include $(ATMOSPHERE_CPU_MAKE_DIR)/cpu.mk
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# get atmosphere git revision information
|
||||
|
|
|
@ -25,6 +25,9 @@
|
|||
#include "mesosphere/kern_k_typed_address.hpp"
|
||||
#include "mesosphere/kern_initial_process.hpp"
|
||||
|
||||
/* Core pre-initialization includes. */
|
||||
#include "mesosphere/kern_select_cpu.hpp"
|
||||
|
||||
/* Initialization headers. */
|
||||
#include "mesosphere/init/kern_init_elf.hpp"
|
||||
#include "mesosphere/init/kern_init_layout.hpp"
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_panic.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include "../kern_hardware_registers.hpp"
|
||||
#include "../kern_cpu.hpp"
|
||||
|
||||
namespace ams::kern::init {
|
||||
|
||||
|
@ -93,10 +93,17 @@ namespace ams::kern::init {
|
|||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; }
|
||||
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
|
||||
|
||||
/* Should not be called except by derived classes. */
|
||||
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
||||
return this->attributes;
|
||||
}
|
||||
};
|
||||
|
||||
static_assert(sizeof(PageTableEntry) == sizeof(u64));
|
||||
|
||||
constexpr PageTableEntry InvalidPageTableEntry = PageTableEntry(0);
|
||||
|
||||
constexpr size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry);
|
||||
|
||||
class L1PageTableEntry : public PageTableEntry {
|
||||
|
@ -106,6 +113,7 @@ namespace ams::kern::init {
|
|||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||
{
|
||||
|
@ -115,9 +123,15 @@ namespace ams::kern::init {
|
|||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(30, 18);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return (this->GetBlock() | rhs.GetRawAttributes()) == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
class L2PageTableEntry : public PageTableEntry {
|
||||
|
@ -127,6 +141,7 @@ namespace ams::kern::init {
|
|||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||
{
|
||||
|
@ -136,9 +151,15 @@ namespace ams::kern::init {
|
|||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(21, 27);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return (this->GetBlock() | rhs.GetRawAttributes()) == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
class L3PageTableEntry : public PageTableEntry {
|
||||
|
@ -149,12 +170,16 @@ namespace ams::kern::init {
|
|||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; }
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(21, 27);
|
||||
}
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return (this->GetBlock() | rhs.GetRawAttributes()) == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -171,6 +196,10 @@ namespace ams::kern::init {
|
|||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : l1_table(l1) {
|
||||
ClearNewPageTable(this->l1_table);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetL1TableAddress() const {
|
||||
return GetInteger(this->l1_table);
|
||||
}
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KPhysicalAddress _l1_table, KVirtualAddress address) {
|
||||
L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(_l1_table));
|
||||
|
@ -193,7 +222,7 @@ namespace ams::kern::init {
|
|||
std::memset(reinterpret_cast<void *>(GetInteger(address)), 0, PageSize);
|
||||
}
|
||||
public:
|
||||
void Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
||||
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
|
@ -206,7 +235,8 @@ namespace ams::kern::init {
|
|||
/* Can we make an L1 block? */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && util::IsAligned(size, L1BlockSize)) {
|
||||
*l1_entry = L1PageTableEntry(phys_addr, attr, false);
|
||||
/* TODO: DataSynchronizationBarrier */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
virt_addr += L1BlockSize;
|
||||
phys_addr += L1BlockSize;
|
||||
size -= L1BlockSize;
|
||||
|
@ -218,6 +248,7 @@ namespace ams::kern::init {
|
|||
KPhysicalAddress new_table = allocator.Allocate();
|
||||
ClearNewPageTable(new_table);
|
||||
*l1_entry = L1PageTableEntry(phys_addr, attr.IsPrivilegedExecuteNever());
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
}
|
||||
|
||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
|
@ -226,10 +257,11 @@ namespace ams::kern::init {
|
|||
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && util::IsAligned(size, L2ContiguousBlockSize)) {
|
||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||
l2_entry[i] = L2PageTableEntry(phys_addr, attr, true);
|
||||
/* TODO: DataSynchronizationBarrier */
|
||||
virt_addr += L2ContiguousBlockSize;
|
||||
phys_addr += L2ContiguousBlockSize;
|
||||
size -= L2ContiguousBlockSize;
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
virt_addr += L2BlockSize;
|
||||
phys_addr += L2BlockSize;
|
||||
size -= L2BlockSize;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -237,7 +269,8 @@ namespace ams::kern::init {
|
|||
/* Can we make an L2 block? */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize) && util::IsAligned(GetInteger(phys_addr), L2BlockSize) && util::IsAligned(size, L2BlockSize)) {
|
||||
*l2_entry = L2PageTableEntry(phys_addr, attr, false);
|
||||
/* TODO: DataSynchronizationBarrier */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
virt_addr += L2BlockSize;
|
||||
phys_addr += L2BlockSize;
|
||||
size -= L2BlockSize;
|
||||
|
@ -249,6 +282,7 @@ namespace ams::kern::init {
|
|||
KPhysicalAddress new_table = allocator.Allocate();
|
||||
ClearNewPageTable(new_table);
|
||||
*l2_entry = L2PageTableEntry(phys_addr, attr.IsPrivilegedExecuteNever());
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
}
|
||||
|
||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
|
@ -257,17 +291,18 @@ namespace ams::kern::init {
|
|||
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && util::IsAligned(size, L3ContiguousBlockSize)) {
|
||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||
l3_entry[i] = L3PageTableEntry(phys_addr, attr, true);
|
||||
/* TODO: DataSynchronizationBarrier */
|
||||
virt_addr += L3ContiguousBlockSize;
|
||||
phys_addr += L3ContiguousBlockSize;
|
||||
size -= L3ContiguousBlockSize;
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
virt_addr += L3BlockSize;
|
||||
phys_addr += L3BlockSize;
|
||||
size -= L3BlockSize;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Make an L3 block. */
|
||||
*l3_entry = L3PageTableEntry(phys_addr, attr, false);
|
||||
/* TODO: DataSynchronizationBarrier */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
virt_addr += L3BlockSize;
|
||||
phys_addr += L3BlockSize;
|
||||
size -= L3BlockSize;
|
||||
|
@ -275,12 +310,187 @@ namespace ams::kern::init {
|
|||
}
|
||||
|
||||
bool IsFree(KVirtualAddress virt_addr, size_t size) {
|
||||
/* TODO */
|
||||
return false;
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
|
||||
/* If an L1 block is mapped, the address isn't free. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!l1_entry->IsTable()) {
|
||||
/* Not a table, so just move to check the next region. */
|
||||
virt_addr = util::AlignDown(GetInteger(virt_addr) + L1BlockSize, L1BlockSize);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Table, so check if we're mapped in L2. */
|
||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
|
||||
if (l2_entry->IsBlock()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!l2_entry->IsTable()) {
|
||||
/* Not a table, so just move to check the next region. */
|
||||
virt_addr = util::AlignDown(GetInteger(virt_addr) + L2BlockSize, L2BlockSize);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Table, so check if we're mapped in L3. */
|
||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
|
||||
if (l3_entry->IsBlock()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Not a block, so move on to check the next page. */
|
||||
virt_addr = util::AlignDown(GetInteger(virt_addr) + L3BlockSize, L3BlockSize);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void ReprotectFromReadWriteToRead(KVirtualAddress virt_addr, size_t size) {
|
||||
/* TODO */
|
||||
void Reprotect(KVirtualAddress virt_addr, size_t size, const PageTableEntry &attr_before, const PageTableEntry &attr_after) {
|
||||
/* Ensure data consistency before we begin reprotection. */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
/* Iteratively reprotect pages until the requested region is reprotected. */
|
||||
while (size > 0) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
|
||||
/* Check if an L1 block is present. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
/* Ensure that we are allowed to have an L1 block here. */
|
||||
const KPhysicalAddress block = l1_entry->GetBlock();
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before));
|
||||
|
||||
/* Invalidate the existing L1 block. */
|
||||
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
|
||||
/* Create new L1 block. */
|
||||
*l1_entry = L1PageTableEntry(block, attr_after, false);
|
||||
|
||||
virt_addr += L1BlockSize;
|
||||
size -= L1BlockSize;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Not a block, so we must be a table. */
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable());
|
||||
|
||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
if (l2_entry->IsBlock()) {
|
||||
const KPhysicalAddress block = l2_entry->GetBlock();
|
||||
|
||||
if (l2_entry->IsContiguous()) {
|
||||
/* Ensure that we are allowed to have a contiguous L2 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
|
||||
|
||||
/* Invalidate the existing contiguous L2 block. */
|
||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||
/* Ensure that the entry is valid. */
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before));
|
||||
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
|
||||
/* Create a new contiguous L2 block. */
|
||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||
l2_entry[i] = L2PageTableEntry(block + L2BlockSize * i, attr_after, true);
|
||||
}
|
||||
|
||||
virt_addr += L2ContiguousBlockSize;
|
||||
size -= L2ContiguousBlockSize;
|
||||
} else {
|
||||
/* Ensure that we are allowed to have an L2 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before));
|
||||
|
||||
/* Invalidate the existing L2 block. */
|
||||
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
|
||||
/* Create new L2 block. */
|
||||
*l2_entry = L2PageTableEntry(block, attr_after, false);
|
||||
|
||||
virt_addr += L2BlockSize;
|
||||
size -= L2BlockSize;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Not a block, so we must be a table. */
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable());
|
||||
|
||||
/* We must have a mapped l3 entry to reprotect. */
|
||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsBlock());
|
||||
const KPhysicalAddress block = l3_entry->GetBlock();
|
||||
|
||||
if (l3_entry->IsContiguous()) {
|
||||
/* Ensure that we are allowed to have a contiguous L3 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
|
||||
|
||||
/* Invalidate the existing contiguous L3 block. */
|
||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||
/* Ensure that the entry is valid. */
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before));
|
||||
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
|
||||
/* Create a new contiguous L3 block. */
|
||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||
l3_entry[i] = L3PageTableEntry(block + L3BlockSize * i, attr_after, true);
|
||||
}
|
||||
|
||||
virt_addr += L3ContiguousBlockSize;
|
||||
size -= L3ContiguousBlockSize;
|
||||
} else {
|
||||
/* Ensure that we are allowed to have an L3 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before));
|
||||
|
||||
/* Invalidate the existing L3 block. */
|
||||
*static_cast<PageTableEntry *>(l3_entry) = InvalidPageTableEntry;
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
|
||||
/* Create new L3 block. */
|
||||
*l3_entry = L3PageTableEntry(block, attr_after, false);
|
||||
|
||||
virt_addr += L3BlockSize;
|
||||
size -= L3BlockSize;
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure data consistency after we complete reprotection. */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2019 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include "kern_cpu_system_registers.hpp"
|
||||
|
||||
namespace ams::kern::arm64::cpu {
|
||||
|
||||
/* Helpers for managing memory state. */
|
||||
ALWAYS_INLINE void DataSynchronizationBarrier() {
|
||||
__asm__ __volatile__("dsb sy");
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void DataSynchronizationBarrierInnerShareable() {
|
||||
__asm__ __volatile__("dsb ish");
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void DataMemoryBarrier() {
|
||||
__asm__ __volatile__("dmb sy");
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InstructionMemoryBarrier() {
|
||||
__asm__ __volatile__("isb");
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void EnsureInstructionConsistency() {
|
||||
DataSynchronizationBarrier();
|
||||
InstructionMemoryBarrier();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateEntireInstructionCache() {
|
||||
__asm__ __volatile__("ic iallu" ::: "memory");
|
||||
EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
/* Cache management helpers. */
|
||||
void FlushEntireDataCacheShared();
|
||||
void FlushEntireDataCacheLocal();
|
||||
|
||||
ALWAYS_INLINE void InvalidateEntireTlb() {
|
||||
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
||||
EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2019 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
|
||||
namespace ams::kern::arm64::cpu {
|
||||
|
||||
#define MESOSPHERE_CPU_GET_SYSREG(name) \
|
||||
({ \
|
||||
u64 temp_value; \
|
||||
__asm__ __volatile__("mrs %0, " #name "" : "=&r"(temp_value) :: "memory"); \
|
||||
temp_value; \
|
||||
})
|
||||
|
||||
#define MESOSPHERE_CPU_SET_SYSREG(name, value) \
|
||||
({ \
|
||||
__asm__ __volatile__("msr " #name ", %0" :: "r"(value) : "memory", "cc"); \
|
||||
})
|
||||
|
||||
#define MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(name, reg_name) \
|
||||
ALWAYS_INLINE void Set##name(u64 value) { MESOSPHERE_CPU_SET_SYSREG(reg_name, value); } \
|
||||
ALWAYS_INLINE u64 Get##name() { return MESOSPHERE_CPU_GET_SYSREG(reg_name); }
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpuActlrEl1, s3_1_c15_c2_0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpuEctlrEl1, s3_1_c15_c2_1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1)
|
||||
|
||||
/* Base class for register accessors. */
|
||||
class GenericRegisterAccessor {
|
||||
private:
|
||||
u64 value;
|
||||
public:
|
||||
ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ }
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (this->value >> offset) & ((1 << count) - 1);
|
||||
}
|
||||
};
|
||||
|
||||
/* Special code for main id register. */
|
||||
class MainIdRegisterAccessor : public GenericRegisterAccessor {
|
||||
public:
|
||||
enum class Implementer {
|
||||
ArmLimited = 0x41,
|
||||
};
|
||||
enum class PrimaryPartNumber {
|
||||
CortexA53 = 0xD03,
|
||||
CortexA57 = 0xD07,
|
||||
};
|
||||
public:
|
||||
ALWAYS_INLINE MainIdRegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(midr_el1)) { /* ... */ }
|
||||
public:
|
||||
constexpr ALWAYS_INLINE Implementer GetImplementer() const {
|
||||
return static_cast<Implementer>(this->GetBits(24, 8));
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetVariant() const {
|
||||
return this->GetBits(20, 4);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetArchitecture() const {
|
||||
return this->GetBits(16, 4);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE PrimaryPartNumber GetPrimaryPartNumber() const {
|
||||
return static_cast<PrimaryPartNumber>(this->GetBits(4, 12));
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetRevision() const {
|
||||
return this->GetBits(0, 4);
|
||||
}
|
||||
};
|
||||
|
||||
/* Accessors for cache registers. */
|
||||
class CacheLineIdAccessor : public GenericRegisterAccessor {
|
||||
public:
|
||||
ALWAYS_INLINE CacheLineIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(clidr_el1)) { /* ... */ }
|
||||
public:
|
||||
constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const {
|
||||
return static_cast<int>(this->GetBits(24, 3));
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE int GetLevelsOfUnification() const {
|
||||
return static_cast<int>(this->GetBits(21, 3));
|
||||
}
|
||||
|
||||
/* TODO: Other bitfield accessors? */
|
||||
};
|
||||
|
||||
class CacheSizeIdAccessor : public GenericRegisterAccessor {
|
||||
public:
|
||||
ALWAYS_INLINE CacheSizeIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(ccsidr_el1)) { /* ... */ }
|
||||
public:
|
||||
constexpr ALWAYS_INLINE int GetNumberOfSets() const {
|
||||
return static_cast<int>(this->GetBits(13, 15));
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE int GetAssociativity() const {
|
||||
return static_cast<int>(this->GetBits(3, 10));
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE int GetLineSize() const {
|
||||
return static_cast<int>(this->GetBits(0, 3));
|
||||
}
|
||||
|
||||
/* TODO: Other bitfield accessors? */
|
||||
};
|
||||
|
||||
#undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS
|
||||
#undef MESOSPHERE_CPU_GET_SYSREG
|
||||
#undef MESOSPHERE_CPU_SET_SYSREG
|
||||
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2019 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/*
|
||||
From musl include/elf.h
|
||||
|
||||
Copyright © 2005-2014 Rich Felker, et al.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
|
||||
namespace ams::kern::hw {
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -25,6 +25,10 @@ namespace ams::kern {
|
|||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
||||
static bool ShouldIncreaseResourceRegionSize();
|
||||
|
||||
/* Randomness. */
|
||||
static void GenerateRandomBytes(void *dst, size_t size);
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
|
||||
/* Panic. */
|
||||
static NORETURN void StopSystem();
|
||||
};
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace ams::kern::init {
|
|||
u32 bss_offset;
|
||||
u32 bss_end_offset;
|
||||
u32 ini_end_offset;
|
||||
u32 dynamic_end_offset;
|
||||
u32 dynamic_offset;
|
||||
u32 init_array_offset;
|
||||
u32 init_array_end_offset;
|
||||
};
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2019 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||
#include "arch/arm64/kern_cpu.hpp"
|
||||
|
||||
namespace ams::kern::cpu {
|
||||
|
||||
using namespace ams::kern::arm64::cpu;
|
||||
|
||||
}
|
||||
|
||||
#else
|
||||
#error "Unknown architecture for CPU"
|
||||
#endif
|
69
libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp
Normal file
69
libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2019 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern::arm64::cpu {
|
||||
|
||||
namespace {
|
||||
|
||||
void FlushEntireDataCacheImpl(int level) {
|
||||
/* Used in multiple locations. */
|
||||
const u64 level_sel_value = static_cast<u64>(level << 1);
|
||||
|
||||
/* Set selection register. */
|
||||
cpu::SetCsselrEl1(level_sel_value);
|
||||
cpu::InstructionMemoryBarrier();
|
||||
|
||||
/* Get cache size id info. */
|
||||
CacheSizeIdAccessor ccsidr_el1;
|
||||
const int num_sets = ccsidr_el1.GetNumberOfSets();
|
||||
const int num_ways = ccsidr_el1.GetAssociativity();
|
||||
const int line_size = ccsidr_el1.GetLineSize();
|
||||
|
||||
const u64 way_shift = static_cast<u64>(__builtin_clz(num_ways));
|
||||
const u64 set_shift = static_cast<u64>(line_size + 4);
|
||||
|
||||
for (int way = 0; way <= num_ways; way++) {
|
||||
for (int set = 0; set <= num_sets; set++) {
|
||||
const u64 way_value = static_cast<u64>(way) << way_shift;
|
||||
const u64 set_value = static_cast<u64>(set) << set_shift;
|
||||
const u64 cisw_value = way_value | set_value | level_sel_value;
|
||||
__asm__ __volatile__("dc cisw, %0" ::"r"(cisw_value) : "memory");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void FlushEntireDataCacheShared() {
|
||||
CacheLineIdAccessor clidr_el1;
|
||||
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||
|
||||
for (int level = levels_of_coherency; level >= levels_of_unification; level--) {
|
||||
FlushEntireDataCacheImpl(level);
|
||||
}
|
||||
}
|
||||
|
||||
void FlushEntireDataCacheLocal() {
|
||||
CacheLineIdAccessor clidr_el1;
|
||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||
|
||||
for (int level = levels_of_unification - 1; level >= 0; level--) {
|
||||
FlushEntireDataCacheImpl(level);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -39,6 +39,12 @@ namespace ams::kern {
|
|||
return value;
|
||||
}
|
||||
|
||||
inline u64 GenerateRandomU64() {
|
||||
u64 value;
|
||||
smc::GenerateRandomBytes(&value, sizeof(value));
|
||||
return value;
|
||||
}
|
||||
|
||||
inline smc::MemoryMode GetMemoryMode() {
|
||||
return static_cast<smc::MemoryMode>((GetKernelConfiguration() >> 10) & 0x3);
|
||||
}
|
||||
|
@ -73,6 +79,24 @@ namespace ams::kern {
|
|||
return (GetKernelConfiguration() >> 3) & 1;
|
||||
}
|
||||
|
||||
/* Randomness. */
|
||||
void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
|
||||
MESOSPHERE_ABORT_UNLESS(size <= 0x38);
|
||||
smc::GenerateRandomBytes(dst, size);
|
||||
}
|
||||
|
||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||
/* This is a biased random, but this is okay for now. */
|
||||
/* TODO: unbiased random? */
|
||||
const u64 range_size = ((max + 1) - min);
|
||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||
while (true) {
|
||||
if (const u64 rnd = GenerateRandomU64(); rnd < effective_max) {
|
||||
return rnd % effective_max;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KSystemControl::StopSystem() {
|
||||
/* Display a panic screen via exosphere. */
|
||||
smc::Panic(0xF00);
|
||||
|
|
|
@ -80,6 +80,18 @@ namespace ams::kern::smc {
|
|||
}
|
||||
}
|
||||
|
||||
void GenerateRandomBytes(void *dst, size_t size) {
|
||||
/* Call SmcGenerateRandomBytes() */
|
||||
/* TODO: Lock this to ensure only one core calls at once. */
|
||||
SecureMonitorArguments args = { FunctionId_GetConfig, size };
|
||||
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
|
||||
CallPrivilegedSecureMonitorFunction(args);
|
||||
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
|
||||
|
||||
/* Copy output. */
|
||||
std::memcpy(dst, &args.x[1], size);
|
||||
}
|
||||
|
||||
void NORETURN Panic(u32 color) {
|
||||
SecureMonitorArguments args = { FunctionId_Panic, color };
|
||||
CallPrivilegedSecureMonitorFunction(args);
|
||||
|
|
|
@ -65,6 +65,7 @@ namespace ams::kern::smc {
|
|||
|
||||
/* TODO: Rest of Secure Monitor API. */
|
||||
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
|
||||
void GenerateRandomBytes(void *dst, size_t size);
|
||||
void NORETURN Panic(u32 color);
|
||||
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#define NORETURN __attribute__((noreturn))
|
||||
#define WEAK_SYMBOL __attribute__((weak))
|
||||
#define ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
#define NOINLINE __attribute__((noinline))
|
||||
|
||||
#define CONST_FOLD(x) (__builtin_constant_p(x) ? (x) : (x))
|
||||
|
||||
|
|
|
@ -14,6 +14,15 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
#include "kern_init_loader_asm.hpp"
|
||||
|
||||
/* Necessary for calculating kernelldr size/base for initial identity mapping */
|
||||
extern "C" {
|
||||
|
||||
extern const u8 __start__[];
|
||||
extern const u8 __end__[];
|
||||
|
||||
}
|
||||
|
||||
namespace ams::kern::init::loader {
|
||||
|
||||
|
@ -67,6 +76,26 @@ namespace ams::kern::init::loader {
|
|||
}
|
||||
}
|
||||
|
||||
void EnsureEntireDataCacheFlushed() {
|
||||
/* Flush shared cache. */
|
||||
cpu::FlushEntireDataCacheShared();
|
||||
cpu::DataSynchronizationBarrier();
|
||||
|
||||
/* Flush local cache. */
|
||||
cpu::FlushEntireDataCacheLocal();
|
||||
cpu::DataSynchronizationBarrier();
|
||||
|
||||
/* Flush shared cache. */
|
||||
cpu::FlushEntireDataCacheShared();
|
||||
cpu::DataSynchronizationBarrier();
|
||||
|
||||
/* Invalidate entire instruction cache. */
|
||||
cpu::InvalidateEntireInstructionCache();
|
||||
|
||||
/* Invalidate entire TLB. */
|
||||
cpu::InvalidateEntireTlb();
|
||||
}
|
||||
|
||||
void SetupInitialIdentityMapping(KInitialPageTable &ttbr1_table, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageTable::IPageAllocator &allocator) {
|
||||
/* Make a new page table for TTBR0_EL1. */
|
||||
KInitialPageTable ttbr0_table(allocator.Allocate());
|
||||
|
@ -77,7 +106,139 @@ namespace ams::kern::init::loader {
|
|||
|
||||
/* Map in an RWX identity mapping for ourselves. */
|
||||
constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||
//ttbr0_table.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator);
|
||||
const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast<uintptr_t>(__start__), PageSize);
|
||||
const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast<uintptr_t>(__end__), PageSize) - kernel_ldr_base;
|
||||
ttbr0_table.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator);
|
||||
|
||||
/* Map in the page table region as RW- for ourselves. */
|
||||
constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||
ttbr0_table.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator);
|
||||
|
||||
/* Place the L1 table addresses in the relevant system registers. */
|
||||
cpu::SetTtbr0El1(ttbr0_table.GetL1TableAddress());
|
||||
cpu::SetTtbr1El1(ttbr1_table.GetL1TableAddress());
|
||||
|
||||
/* Setup MAIR_EL1, TCR_EL1. */
|
||||
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/
|
||||
constexpr u64 MairValue = 0x0000000044FF0400ul;
|
||||
constexpr u64 TcrValue = 0x00000011B5193519ul;
|
||||
cpu::SetMairEl1(MairValue);
|
||||
cpu::SetTcrEl1(TcrValue);
|
||||
|
||||
/* Perform cpu-specific setup. */
|
||||
{
|
||||
SavedRegisterState saved_registers;
|
||||
SaveRegistersToTpidrEl1(&saved_registers);
|
||||
ON_SCOPE_EXIT { VerifyAndClearTpidrEl1(&saved_registers); };
|
||||
|
||||
/* Main ID specific setup. */
|
||||
cpu::MainIdRegisterAccessor midr_el1;
|
||||
if (midr_el1.GetImplementer() == cpu::MainIdRegisterAccessor::Implementer::ArmLimited) {
|
||||
/* ARM limited specific setup. */
|
||||
const auto cpu_primary_part = midr_el1.GetPrimaryPartNumber();
|
||||
const auto cpu_variant = midr_el1.GetVariant();
|
||||
const auto cpu_revision = midr_el1.GetRevision();
|
||||
if (cpu_primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA57) {
|
||||
/* Cortex-A57 specific setup. */
|
||||
|
||||
/* Non-cacheable load forwarding enabled. */
|
||||
u64 cpuactlr_value = 0x1000000;
|
||||
|
||||
/* Enable the processor to receive instruction cache and TLB maintenance */
|
||||
/* operations broadcast from other processors in the cluster; */
|
||||
/* set the L2 load/store data prefetch distance to 8 requests; */
|
||||
/* set the L2 instruction fetch prefetch distance to 3 requests. */
|
||||
u64 cpuectlr_value = 0x1B00000040;
|
||||
|
||||
/* Disable load-pass DMB on certain hardware variants. */
|
||||
if (cpu_variant == 0 || (cpu_variant == 1 && cpu_revision <= 1)) {
|
||||
cpuactlr_value |= 0x800000000000000;
|
||||
}
|
||||
|
||||
/* Set actlr and ectlr. */
|
||||
if (cpu::GetCpuActlrEl1() != cpuactlr_value) {
|
||||
cpu::SetCpuActlrEl1(cpuactlr_value);
|
||||
}
|
||||
if (cpu::GetCpuEctlrEl1() != cpuectlr_value) {
|
||||
cpu::SetCpuEctlrEl1(cpuectlr_value);
|
||||
}
|
||||
} else if (cpu_primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA53) {
|
||||
/* Cortex-A53 specific setup. */
|
||||
|
||||
/* Set L1 data prefetch control to allow 5 outstanding prefetches; */
|
||||
/* enable device split throttle; */
|
||||
/* set the number of independent data prefetch streams to 2; */
|
||||
/* disable transient and no-read-allocate hints for loads; */
|
||||
/* set write streaming no-allocate threshold so the 128th consecutive streaming */
|
||||
/* cache line does not allocate in the L1 or L2 cache. */
|
||||
u64 cpuactlr_value = 0x90CA000;
|
||||
|
||||
/* Enable hardware management of data coherency with other cores in the cluster. */
|
||||
u64 cpuectlr_value = 0x40;
|
||||
|
||||
/* If supported, enable data cache clean as data cache clean/invalidate. */
|
||||
if (cpu_variant != 0 || (cpu_variant == 0 && cpu_revision > 2)) {
|
||||
cpuactlr_value |= 0x100000000000;
|
||||
}
|
||||
|
||||
/* Set actlr and ectlr. */
|
||||
if (cpu::GetCpuActlrEl1() != cpuactlr_value) {
|
||||
cpu::SetCpuActlrEl1(cpuactlr_value);
|
||||
}
|
||||
if (cpu::GetCpuEctlrEl1() != cpuectlr_value) {
|
||||
cpu::SetCpuEctlrEl1(cpuectlr_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that the entire cache is flushed. */
|
||||
EnsureEntireDataCacheFlushed();
|
||||
|
||||
/* Setup SCTLR_EL1. */
|
||||
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/
|
||||
constexpr u64 SctlrValue = 0x0000000034D5D925ul;
|
||||
cpu::SetSctlrEl1(SctlrValue);
|
||||
cpu::EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
KVirtualAddress GetRandomKernelBaseAddress(KInitialPageTable &page_table, KPhysicalAddress phys_base_address, size_t kernel_size) {
|
||||
/* Define useful values for random generation. */
|
||||
constexpr uintptr_t KernelBaseAlignment = 0x200000;
|
||||
constexpr uintptr_t KernelBaseRangeMin = 0xFFFFFF8000000000;
|
||||
constexpr uintptr_t KernelBaseRangeMax = 0xFFFFFFFFFFE00000;
|
||||
constexpr uintptr_t KernelBaseRangeEnd = KernelBaseRangeMax - 1;
|
||||
static_assert(util::IsAligned(KernelBaseRangeMin, KernelBaseAlignment));
|
||||
static_assert(util::IsAligned(KernelBaseRangeMax, KernelBaseAlignment));
|
||||
static_assert(KernelBaseRangeMin <= KernelBaseRangeEnd);
|
||||
|
||||
const uintptr_t kernel_offset = GetInteger(phys_base_address) % KernelBaseAlignment;
|
||||
|
||||
/* Repeatedly generate a random virtual address until we get one that's unmapped in the destination page table. */
|
||||
while (true) {
|
||||
const KVirtualAddress random_kaslr_slide = KSystemControl::GenerateRandomRange(KernelBaseRangeMin, KernelBaseRangeEnd);
|
||||
const KVirtualAddress kernel_region_start = util::AlignDown(GetInteger(random_kaslr_slide), KernelBaseAlignment);
|
||||
const KVirtualAddress kernel_region_end = util::AlignUp(GetInteger(kernel_region_start) + kernel_offset + kernel_size, KernelBaseAlignment);
|
||||
const size_t kernel_region_size = GetInteger(kernel_region_end) - GetInteger(kernel_region_start);
|
||||
|
||||
/* Make sure the region has not overflowed */
|
||||
if (kernel_region_start >= kernel_region_end) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Make sure that the region stays within our intended bounds. */
|
||||
if (kernel_region_end > KernelBaseRangeMax) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Validate we can map the range we've selected. */
|
||||
if (!page_table.IsFree(kernel_region_start, kernel_region_size)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Our range is valid! */
|
||||
return kernel_region_start + kernel_offset;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -104,11 +265,10 @@ namespace ams::kern::init::loader {
|
|||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(ro_offset, 0x1000));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(ro_end_offset, 0x1000));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(rw_offset, 0x1000));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(rw_end_offset, 0x1000));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(bss_end_offset, 0x1000));
|
||||
const uintptr_t bss_offset = layout->bss_offset;
|
||||
const uintptr_t ini_end_offset = layout->ini_end_offset;
|
||||
const uintptr_t dynamic_end_offset = layout->dynamic_end_offset;
|
||||
const uintptr_t dynamic_offset = layout->dynamic_offset;
|
||||
const uintptr_t init_array_offset = layout->init_array_offset;
|
||||
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
||||
|
||||
|
@ -140,16 +300,35 @@ namespace ams::kern::init::loader {
|
|||
/* Setup initial identity mapping. TTBR1 table passed by reference. */
|
||||
SetupInitialIdentityMapping(ttbr1_table, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSize, g_initial_page_allocator);
|
||||
|
||||
/* TODO: Use these. */
|
||||
(void)(bss_offset);
|
||||
(void)(ini_end_offset);
|
||||
(void)(dynamic_end_offset);
|
||||
(void)(init_array_offset);
|
||||
(void)(init_array_end_offset);
|
||||
/* Generate a random slide for the kernel's base address. */
|
||||
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(ttbr1_table, base_address, bss_end_offset);
|
||||
|
||||
/* Map kernel .text as R-X. */
|
||||
constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||
ttbr1_table.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator);
|
||||
|
||||
/* TODO */
|
||||
return 0;
|
||||
/* Map kernel .rodata and .rwdata as RW-. */
|
||||
/* Note that we will later reprotect .rodata as R-- */
|
||||
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||
ttbr1_table.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||
ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||
|
||||
/* Clear kernel .bss. */
|
||||
std::memset(GetVoidPointer(virtual_base_address + bss_offset), 0, bss_end_offset - rw_end_offset);
|
||||
|
||||
/* Apply relocations to the kernel. */
|
||||
const Elf::Elf64::Dyn *kernel_dynamic = reinterpret_cast<const Elf::Elf64::Dyn *>(GetInteger(virtual_base_address) + dynamic_offset);
|
||||
Elf::Elf64::ApplyRelocations(GetInteger(virtual_base_address), kernel_dynamic);
|
||||
|
||||
/* Reprotect .rodata as R-- */
|
||||
ttbr1_table.Reprotect(virtual_base_address + ro_offset, ro_end_offset - ro_offset, KernelRwDataAttribute, KernelRoDataAttribute);
|
||||
|
||||
/* Call the kernel's init array functions. */
|
||||
Elf::Elf64::CallInitArrayFuncs(GetInteger(virtual_base_address) + init_array_offset, GetInteger(virtual_base_address) + init_array_end_offset);
|
||||
|
||||
/* Return the difference between the random virtual base and the physical base. */
|
||||
return GetInteger(virtual_base_address) - base_address;
|
||||
}
|
||||
|
||||
void Finalize() {
|
||||
|
|
31
mesosphere/kernel_ldr/source/kern_init_loader_asm.hpp
Normal file
31
mesosphere/kernel_ldr/source/kern_init_loader_asm.hpp
Normal file
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2019 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern::init::loader {
|
||||
|
||||
struct SavedRegisterState {
|
||||
u64 x[(30 - 19) + 1];
|
||||
u64 sp;
|
||||
u64 xzr;
|
||||
};
|
||||
static_assert(sizeof(SavedRegisterState) == 0x70);
|
||||
|
||||
int SaveRegistersToTpidrEl1(void *tpidr_el1);
|
||||
void VerifyAndClearTpidrEl1(void *tpidr_el1);
|
||||
|
||||
}
|
48
mesosphere/kernel_ldr/source/kern_init_loader_asm.s
Normal file
48
mesosphere/kernel_ldr/source/kern_init_loader_asm.s
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2019 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
.section .text._ZN3ams4kern4init6loader23SaveRegistersToTpidrEl1EPv, "ax", %progbits
|
||||
.global _ZN3ams4kern4init6loader23SaveRegistersToTpidrEl1EPv
|
||||
_ZN3ams4kern4init6loader23SaveRegistersToTpidrEl1EPv:
|
||||
/* Set TPIDR_EL1 to the input register. */
|
||||
msr tpidr_el1, x0
|
||||
|
||||
/* Save registers to the region specified. */
|
||||
mov x1, sp
|
||||
stp x19, x20, [x0], #0x10
|
||||
stp x21, x22, [x0], #0x10
|
||||
stp x23, x24, [x0], #0x10
|
||||
stp x25, x26, [x0], #0x10
|
||||
stp x27, x28, [x0], #0x10
|
||||
stp x29, x30, [x0], #0x10
|
||||
stp x1, xzr, [x0], #0x10
|
||||
mov x0, #0x0
|
||||
ret
|
||||
|
||||
.section .text._ZN3ams4kern4init6loader22VerifyAndClearTpidrEl1EPv, "ax", %progbits
|
||||
.global _ZN3ams4kern4init6loader22VerifyAndClearTpidrEl1EPv
|
||||
_ZN3ams4kern4init6loader22VerifyAndClearTpidrEl1EPv:
|
||||
/* Get system register area from thread-specific processor id */
|
||||
mrs x1, tpidr_el1
|
||||
|
||||
/* We require here that the region registers are saved is same as input. */
|
||||
cmp x0, x1
|
||||
invalid_tpidr:
|
||||
b.ne invalid_tpidr
|
||||
|
||||
/* Clear TPIDR_EL1. */
|
||||
msr tpidr_el1, xzr
|
||||
ret
|
|
@ -80,7 +80,7 @@ _start:
|
|||
|
||||
/* Return to the newly-relocated kernel. */
|
||||
ldr x1, [sp, #0x18] /* Return address to Kernel */
|
||||
ldr x2, [sp, #0x00] /* Relocated kernel base address. */
|
||||
ldr x2, [sp, #0x00] /* Relocated kernel base address diff. */
|
||||
add x1, x2, x1
|
||||
br x1
|
||||
|
||||
|
|
Loading…
Reference in a new issue