diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index f155cebc1..135b6b5a6 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -36,3 +36,6 @@ /* Core functionality. */ #include "mesosphere/kern_select_interrupts.hpp" #include "mesosphere/kern_select_k_system_control.hpp" + +/* Supervisor Calls. */ +#include "mesosphere/kern_svc.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp new file mode 100644 index 000000000..6eaa75f9a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc/kern_svc_k_user_pointer.hpp" +#include "svc/kern_svc_prototypes.hpp" +#include "svc/kern_svc_tables.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp new file mode 100644 index 000000000..368ff39dd --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::svc { + + /* TODO: Actually implement this type. */ + template + struct KUserPointer : impl::KUserPointerTag { + public: + static_assert(std::is_pointer::value); + static constexpr bool IsInput = std::is_const::type>::value; + private: + T pointer; + }; + + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp new file mode 100644 index 000000000..3412373ba --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "kern_svc_k_user_pointer.hpp" + +namespace ams::kern::svc { + + #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64(ID, RETURN_TYPE, NAME, ...) \ + RETURN_TYPE NAME##64(__VA_ARGS__); + #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32(ID, RETURN_TYPE, NAME, ...) \ + RETURN_TYPE NAME##64From32(__VA_ARGS__); + + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64, lp64) + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32, ilp32) + + /* TODO: Support _32 ABI */ + + #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64 + #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32 + + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp new file mode 100644 index 000000000..42b4a62bd --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::svc { + + static constexpr size_t NumSupervisorCalls = 0x80; + using SvcTableEntry = void (*)(); + + /* TODO: 32-bit ABI */ + + extern const std::array SvcTable64From32; + extern const std::array SvcTable64; + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/svc/kern_svc_tables.cpp new file mode 100644 index 000000000..a2fd6a28d --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_tables.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +namespace ams::kern::svc { + + namespace { + + #define DECLARE_SVC_STRUCT(ID, RETURN_TYPE, NAME, ...) \ + class NAME { \ + private: \ + using Impl = ::ams::svc::codegen::KernelSvcWrapper<::ams::kern::svc::NAME##64, ::ams::kern::svc::NAME##64From32>; \ + public: \ + static NOINLINE void Call64() { return Impl::Call64(); } \ + static NOINLINE void Call64From32() { return Impl::Call64From32(); } \ + }; + + + + /* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ + #pragma GCC push_options + #pragma GCC optimize ("omit-frame-pointer") + + AMS_SVC_FOREACH_KERN_DEFINITION(DECLARE_SVC_STRUCT, _) + + #pragma GCC pop_options + + } + + /* TODO: 32-bit ABI */ + const std::array SvcTable64From32 = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + table[ID] = NAME::Call64From32; + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + return table; + }(); + + const std::array SvcTable64 = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + table[ID] = NAME::Call64; + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + return table; + }(); + +} diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp new file mode 100644 index 000000000..6a13d93e0 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + #define SVC_CODEGEN_FOR_I_FROM_0_TO_64(HANDLER, ...) \ + HANDLER( 0, ## __VA_ARGS__); HANDLER( 1, ## __VA_ARGS__); HANDLER( 2, ## __VA_ARGS__); HANDLER( 3, ## __VA_ARGS__); \ + HANDLER( 4, ## __VA_ARGS__); HANDLER( 5, ## __VA_ARGS__); HANDLER( 6, ## __VA_ARGS__); HANDLER( 7, ## __VA_ARGS__); \ + HANDLER( 8, ## __VA_ARGS__); HANDLER( 9, ## __VA_ARGS__); HANDLER(10, ## __VA_ARGS__); HANDLER(11, ## __VA_ARGS__); \ + HANDLER(12, ## __VA_ARGS__); HANDLER(13, ## __VA_ARGS__); HANDLER(14, ## __VA_ARGS__); HANDLER(15, ## __VA_ARGS__); \ + HANDLER(16, ## __VA_ARGS__); HANDLER(17, ## __VA_ARGS__); HANDLER(18, ## __VA_ARGS__); HANDLER(19, ## __VA_ARGS__); \ + HANDLER(20, ## __VA_ARGS__); HANDLER(21, ## __VA_ARGS__); HANDLER(22, ## __VA_ARGS__); HANDLER(23, ## __VA_ARGS__); \ + HANDLER(24, ## __VA_ARGS__); HANDLER(25, ## __VA_ARGS__); HANDLER(26, ## __VA_ARGS__); HANDLER(27, ## __VA_ARGS__); \ + HANDLER(28, ## __VA_ARGS__); HANDLER(29, ## __VA_ARGS__); HANDLER(30, ## __VA_ARGS__); HANDLER(31, ## __VA_ARGS__); \ + HANDLER(32, ## __VA_ARGS__); HANDLER(33, ## __VA_ARGS__); HANDLER(34, ## __VA_ARGS__); HANDLER(35, ## __VA_ARGS__); \ + HANDLER(36, ## __VA_ARGS__); HANDLER(37, ## __VA_ARGS__); HANDLER(38, ## __VA_ARGS__); HANDLER(39, ## __VA_ARGS__); \ + HANDLER(40, ## __VA_ARGS__); HANDLER(41, ## __VA_ARGS__); HANDLER(42, ## __VA_ARGS__); HANDLER(43, ## __VA_ARGS__); \ + HANDLER(44, ## __VA_ARGS__); HANDLER(45, ## __VA_ARGS__); HANDLER(46, ## __VA_ARGS__); HANDLER(47, ## __VA_ARGS__); \ + HANDLER(48, ## __VA_ARGS__); HANDLER(49, ## __VA_ARGS__); HANDLER(50, ## __VA_ARGS__); HANDLER(51, ## __VA_ARGS__); \ + HANDLER(52, ## __VA_ARGS__); HANDLER(53, ## __VA_ARGS__); HANDLER(54, ## __VA_ARGS__); HANDLER(55, ## __VA_ARGS__); \ + HANDLER(56, ## __VA_ARGS__); HANDLER(57, ## __VA_ARGS__); HANDLER(58, ## __VA_ARGS__); HANDLER(59, ## __VA_ARGS__); \ + HANDLER(60, ## __VA_ARGS__); HANDLER(61, ## __VA_ARGS__); HANDLER(62, ## __VA_ARGS__); HANDLER(63, ## __VA_ARGS__); + + + class Aarch64CodeGenerator { + private: + struct RegisterPair { + size_t First; + size_t Second; + }; + + template + struct RegisterPairHelper; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 1 + RegisterPairHelper::PairCount; + static constexpr std::array Pairs = [] { + std::array pairs = {}; + pairs[0] = RegisterPair{First, Second}; + if constexpr (RegisterPairHelper::PairCount) { + for (size_t i = 0; i < RegisterPairHelper::PairCount; i++) { + pairs[1+i] = RegisterPairHelper::Pairs[i]; + } + } + return pairs; + }(); + }; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 1; + static constexpr std::array Pairs = { RegisterPair{First, Second} }; + }; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 0; + static constexpr std::array Pairs = {}; + }; + + template + static ALWAYS_INLINE void ClearRegister() { + __asm__ __volatile__("mov x%c[r], xzr" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegister() { + __asm__ __volatile__("str x%c[r], [sp, -16]!" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void RestoreRegister() { + __asm__ __volatile__("ldr x%c[r], [sp], 16" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegisterPair() { + __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, -16]!" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory"); + } + + template + static ALWAYS_INLINE void RestoreRegisterPair() { + __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp], 16" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegistersImpl() { + #define SVC_CODEGEN_HANDLER(n) \ + do { if constexpr ((63 - n) < Pairs.size()) { SaveRegisterPair(); } } while (0) + + if constexpr (sizeof...(Rest) % 2 == 1) { + /* Even number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else if constexpr (sizeof...(Rest) > 0) { + /* Odd number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + + SaveRegister(); + } else { + /* Only one register. */ + SaveRegister(); + } + + #undef SVC_CODEGEN_HANDLER + } + + template + static ALWAYS_INLINE void RestoreRegistersImpl() { + #define SVC_CODEGEN_HANDLER(n) \ + do { if constexpr (n < Pairs.size()) { RestoreRegisterPair(); } } while (0) + + if constexpr (sizeof...(Rest) % 2 == 1) { + /* Even number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else if constexpr (sizeof...(Rest) > 0) { + /* Odd number of registers. */ + RestoreRegister(); + + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else { + /* Only one register. */ + RestoreRegister(); + } + + #undef SVC_CODEGEN_HANDLER + } + + public: + template + static ALWAYS_INLINE void SaveRegisters() { + if constexpr (sizeof...(Registers) > 0) { + SaveRegistersImpl(); + } + } + + template + static ALWAYS_INLINE void RestoreRegisters() { + if constexpr (sizeof...(Registers) > 0) { + RestoreRegistersImpl(); + } + } + + template + static ALWAYS_INLINE void ClearRegisters() { + static_assert(sizeof...(Registers) <= 8); + (ClearRegister(), ...); + } + + template + static ALWAYS_INLINE void AllocateStackSpace() { + if constexpr (Size > 0) { + __asm__ __volatile__("sub sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory"); + } + } + + template + static ALWAYS_INLINE void FreeStackSpace() { + if constexpr (Size > 0) { + __asm__ __volatile__("add sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory"); + } + } + + template + static ALWAYS_INLINE void MoveRegister() { + __asm__ __volatile__("mov x%c[dst], x%c[src]" :: [dst]"i"(Dst), [src]"i"(Src) : "memory"); + } + + template + static ALWAYS_INLINE void LoadFromStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("ldr w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("ldr x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void LoadPairFromStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("ldp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void StoreToStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("str w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("str x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void StorePairToStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("stp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void Pack() { + __asm__ __volatile__("orr x%c[dst], x%c[low], x%c[high], lsl #32" :: [dst]"i"(Dst), [low]"i"(Low), [high]"i"(High) : "memory"); + } + + template + static ALWAYS_INLINE void Unpack() { + if constexpr (Src != Low) { + MoveRegister(); + } + + __asm__ __volatile__("lsr x%c[high], x%c[src], #32" :: [high]"i"(High), [src]"i"(Src) : "memory"); + } + + template + static ALWAYS_INLINE void LoadStackAddress() { + if constexpr (Offset > 0) { + __asm__ __volatile__("add x%c[dst], sp, %c[offset]" :: [dst]"i"(Dst), [offset]"i"(Offset) : "memory"); + } else if constexpr (Offset == 0) { + __asm__ __volatile__("mov x%c[dst], sp" :: [dst]"i"(Dst) : "memory"); + } + } + }; + + class Aarch32CodeGenerator { + /* TODO */ + }; + + template + static ALWAYS_INLINE void GenerateCodeForMetaCode(MetaCodeHolder) { + constexpr auto MetaCode = UNWRAP_TEMPLATE_CONSTANT(MetaCodeHolder); + constexpr size_t NumOperations = MetaCode.GetNumOperations(); + static_assert(NumOperations <= 64); + #define SVC_CODEGEN_HANDLER(n) do { if constexpr (n < NumOperations) { constexpr auto Operation = MetaCode.GetOperation(n); GenerateCodeForOperation(WRAP_TEMPLATE_CONSTANT(Operation)); } } while (0) + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + #undef SVC_CODEGEN_HANDLER + } + + #undef SVC_CODEGEN_FOR_I_FROM_0_TO_64 + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp new file mode 100644 index 000000000..c87b4e7c3 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::svc::codegen::impl { + + template + constexpr inline bool IsIntegral = std::is_integral::value; + + template<> + constexpr inline bool IsIntegral<::ams::svc::Address> = true; + + template<> + constexpr inline bool IsIntegral<::ams::svc::Size> = true; + + template + constexpr inline bool IsKUserPointer = std::is_base_of::value; + + template + constexpr inline bool IsIntegralOrUserPointer = IsIntegral || IsUserPointer || IsKUserPointer; + + template + constexpr std::index_sequence IndexSequenceCat(std::index_sequence, std::index_sequence) { + return std::index_sequence{}; + } + + template + constexpr inline std::array ConvertToArray(std::index_sequence) { + return std::array{ Is... }; + } + + template + class FunctionTraits { + private: + template + static R GetReturnTypeImpl(R(*)(A...)); + + template + static std::tuple GetArgsImpl(R(*)(A...)); + public: + using ReturnType = decltype(GetReturnTypeImpl(Function)); + using ArgsType = decltype(GetArgsImpl(Function)); + }; + + enum class CodeGenerationKind { + SvcInvocationToKernelProcedure, + PrepareForKernelProcedureToSvcInvocation, + KernelProcedureToSvcInvocation, + Invalid, + }; + + enum class ArgumentType { + In, + Out, + InUserPointer, + OutUserPointer, + Invalid, + }; + + template + constexpr inline ArgumentType GetArgumentType = [] { + static_assert(!std::is_reference::value, "SVC ABI: Reference types not allowed."); + static_assert(sizeof(T) <= sizeof(uint64_t), "SVC ABI: Type too large"); + if constexpr (std::is_pointer::value) { + static_assert(!std::is_const::type>::value, "SVC ABI: Output (T*) must not be const"); + return ArgumentType::Out; + } else if constexpr (IsUserPointer || IsKUserPointer) { + if constexpr (T::IsInput) { + return ArgumentType::InUserPointer; + } else { + return ArgumentType::OutUserPointer; + } + } else { + return ArgumentType::In; + } + }(); + + template + struct AbiType { + static constexpr size_t RegisterSize = RS; + static constexpr size_t RegisterCount = RC; + static constexpr size_t ArgumentRegisterCount = ARC; + static constexpr size_t PointerSize = PC; + + template + static constexpr size_t GetSize() { + if constexpr (std::is_same::value || std::is_same::value || IsUserPointer || IsKUserPointer) { + return PointerSize; + } else if constexpr(std::is_pointer::value) { + /* Out parameter. */ + return GetSize::type>(); + } else if constexpr (std::is_same::value) { + return 0; + } else { + return sizeof(T); + } + } + + template + static constexpr inline size_t Size = GetSize(); + }; + + using Aarch64Lp64Abi = AbiType<8, 8, 8, 8>; + using Aarch64Ilp32Abi = AbiType<8, 8, 8, 4>; + using Aarch32Ilp32Abi = AbiType<4, 4, 4, 4>; + + using Aarch64SvcInvokeAbi = AbiType<8, 8, 8, 8>; + using Aarch32SvcInvokeAbi = AbiType<4, 8, 4, 4>; + + struct Abi { + size_t register_size; + size_t register_count; + size_t pointer_size; + + template + static constexpr Abi Convert() { return { AbiType::RegisterSize, AbiType::RegisterCount, AbiType::PointerSize }; } + }; + + template + constexpr inline bool IsPassedByPointer = [] { + if (GetArgumentType != ArgumentType::In) { + return true; + } + + return (!IsIntegral && AbiType::template Size > AbiType::RegisterSize); + }(); + + template + class RegisterAllocator { + private: + std::array map; + public: + constexpr explicit RegisterAllocator() : map() { /* ... */ } + + constexpr bool IsAllocated(size_t i) const { return this->map[i]; } + constexpr bool IsFree(size_t i) const { return !this->IsAllocated(i); } + + constexpr void Allocate(size_t i) { + if (this->IsAllocated(i)) { + std::abort(); + } + + this->map[i] = true; + } + + constexpr bool TryAllocate(size_t i) { + if (this->IsAllocated(i)) { + return false; + } + + this->map[i] = true; + return true; + } + + constexpr size_t AllocateFirstFree() { + for (size_t i = 0; i < N; i++) { + if (!this->IsAllocated(i)) { + this->map[i] = true; + return i; + } + } + + std::abort(); + } + + constexpr void Free(size_t i) { + if (!this->IsAllocated(i)) { + std::abort(); + } + + this->map[i] = false; + } + + constexpr size_t GetRegisterCount() const { + return N; + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp new file mode 100644 index 000000000..3fffe60fa --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" +#include "svc_codegen_impl_layout.hpp" +#include "svc_codegen_impl_meta_code.hpp" +#include "svc_codegen_impl_layout_conversion.hpp" +#include "svc_codegen_impl_code_generator.hpp" + +namespace ams::svc::codegen::impl { + + template + class KernelSvcWrapperHelperImpl; + + template + class KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, ReturnType, std::tuple> { + private: + static constexpr bool TryToPerformCoalescingOptimizations = true; + + template + static constexpr void CoalesceOperations(MetaCodeGenerator &out_mcg, const std::array stack_modified, size_t stack_top) { + enum class State { WaitingForRegister, ParsingRegister, ParsedRegister, EmittingCode }; + State cur_state = State::WaitingForRegister; + size_t num_regs = 0; + size_t registers[2] = { InvalidRegisterId, InvalidRegisterId }; + size_t widths[2] = {}; + size_t index = 0; + size_t store_base = 0; + while (index < stack_top) { + if (cur_state == State::WaitingForRegister) { + while (stack_modified[index] == InvalidRegisterId && index < stack_top) { + index++; + } + cur_state = State::ParsingRegister; + } else if (cur_state == State::ParsingRegister) { + const size_t start_index = index; + if (num_regs == 0) { + store_base = start_index; + } + const size_t reg = stack_modified[index]; + registers[num_regs] = reg; + while (index < stack_top && index < start_index + KernelAbiType::RegisterSize && stack_modified[index] == reg) { + widths[num_regs]++; + index++; + } + num_regs++; + cur_state = State::ParsedRegister; + } else if (cur_state == State::ParsedRegister) { + if (num_regs == 2 || stack_modified[index] == InvalidRegisterId) { + cur_state = State::EmittingCode; + } else { + cur_state = State::ParsingRegister; + } + } else if (cur_state == State::EmittingCode) { + /* Emit an operation! */ + MetaCode::Operation st_op = {}; + + if (num_regs == 2) { + if (registers[0] == registers[1]) { + std::abort(); + } + if (widths[0] == widths[1]) { + st_op.kind = PairKind; + st_op.num_parameters = 4; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = registers[1]; + st_op.parameters[2] = store_base; + st_op.parameters[3] = widths[0]; + } else { + std::abort(); + } + } else if (num_regs == 1) { + st_op.kind = SingleKind; + st_op.num_parameters = 3; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = store_base; + st_op.parameters[2] = widths[0]; + } else { + std::abort(); + } + + out_mcg.AddOperationDirectly(st_op); + + /* Go back to beginning of parse. */ + for (size_t i = 0; i < num_regs; i++) { + registers[i] = InvalidRegisterId; + widths[i] = 0; + } + num_regs = 0; + cur_state = State::WaitingForRegister; + } else { + std::abort(); + } + } + + if (cur_state == State::ParsedRegister) { + /* Emit an operation! */ + if (num_regs == 2 && widths[0] == widths[1]) { + MetaCode::Operation st_op = {}; + st_op.kind = PairKind; + st_op.num_parameters = 4; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = registers[1]; + st_op.parameters[2] = store_base; + st_op.parameters[3] = widths[0]; + out_mcg.AddOperationDirectly(st_op); + } else { + for (size_t i = 0; i < num_regs; i++) { + MetaCode::Operation st_op = {}; + st_op.kind = SingleKind; + st_op.num_parameters = 3; + st_op.parameters[0] = registers[i]; + st_op.parameters[1] = store_base; + st_op.parameters[2] = widths[i]; + + store_base += widths[i]; + out_mcg.AddOperationDirectly(st_op); + } + } + } + } + + /* Basic optimization of store coalescing. */ + template + static constexpr bool TryPrepareForKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) { + /* For debugging, allow ourselves to disable these optimizations. */ + if constexpr (!TryToPerformCoalescingOptimizations) { + return false; + } + + /* Generate expected code. */ + MetaCodeGenerator mcg; + RegisterAllocator allocator = out_allocator; + (Conversion::template GenerateCode(mcg, allocator), ...); + MetaCode mc = mcg.GetMetaCode(); + + /* This is a naive optimization pass. */ + /* We want to reorder code of the form: */ + /* - Store to Stack sequence 0... */ + /* - Load Stack Address 0 */ + /* - Store to Stack 1... */ + /* - Load Stack Address 1 */ + /* Into the form: */ + /* - Store to stack Sequence 0 + 1... */ + /* - Load Stack Address 0 + 1... */ + /* But only if they are semantically equivalent. */ + + /* We'll do a simple, naive pass to check if any registers are stored to stack that are modified. */ + /* This shouldn't happen in any cases we care about, so we can probably get away with it. */ + /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */ + /* However, this will be more work, and if it's not necessary it can be put off until it is. */ + constexpr size_t MaxStackIndex = 0x100; + constexpr size_t InvalidRegisterId = N; + bool register_modified[N] = {}; + std::array stack_address_loaded = {}; + for (size_t i = 0; i < N; i++) { stack_address_loaded[i] = MaxStackIndex; } + std::array stack_modified = {}; + for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; } + size_t stack_top = 0; + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind == MetaCode::OperationKind::StoreToStack) { + if (register_modified[mco.parameters[0]]) { + return false; + } + const size_t offset = mco.parameters[1]; + const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2]; + for (size_t j = 0; j < width; j++) { + const size_t index = offset + j; + if (index >= MaxStackIndex) { + std::abort(); + } + if (stack_modified[index] != InvalidRegisterId) { + return false; + } + stack_modified[index] = mco.parameters[0]; + stack_top = std::max(index + 1, stack_top); + } + } else if (mco.kind == MetaCode::OperationKind::LoadStackAddress) { + if (stack_address_loaded[mco.parameters[0]] != MaxStackIndex) { + return false; + } + if (register_modified[mco.parameters[0]]) { + return false; + } + if (mco.parameters[1] >= MaxStackIndex) { + std::abort(); + } + stack_address_loaded[mco.parameters[0]] = mco.parameters[1]; + register_modified[mco.parameters[0]] = true; + } else { + /* TODO: Better operation reasoning process. */ + return false; + } + } + + /* Looks like we can reorder! */ + /* Okay, let's do this the naive way, too. */ + constexpr auto PairKind = MetaCode::OperationKind::StorePairToStack; + constexpr auto SingleKind = MetaCode::OperationKind::StoreToStack; + CoalesceOperations(out_mcg, stack_modified, stack_top); + for (size_t i = 0; i < N; i++) { + if (stack_address_loaded[i] != MaxStackIndex) { + MetaCode::Operation load_op = {}; + load_op.kind = MetaCode::OperationKind::LoadStackAddress; + load_op.num_parameters = 2; + load_op.parameters[0] = i; + load_op.parameters[1] = stack_address_loaded[i]; + out_mcg.AddOperationDirectly(load_op); + } + } + + /* Ensure the out allocator state is correct. */ + out_allocator = allocator; + + return true; + } + + /* Basic optimization of load coalescing. */ + template + static constexpr bool TryKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) { + /* For debugging, allow ourselves to disable these optimizations. */ + if constexpr (!TryToPerformCoalescingOptimizations) { + return false; + } + + /* Generate expected code. */ + MetaCodeGenerator mcg; + RegisterAllocator allocator = out_allocator; + (Conversion::template GenerateCode(mcg, allocator), ...); + MetaCode mc = mcg.GetMetaCode(); + + /* This is a naive optimization pass. */ + /* We want to coalesce all sequential stack loads, if possible. */ + /* But only if they are semantically equivalent. */ + + /* We'll do a simple, naive pass to check if any registers are used after being loaded from stack that. */ + /* This shouldn't happen in any cases we care about, so we can probably get away with it. */ + /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */ + /* However, this will be more work, and if it's not necessary it can be put off until it is. */ + constexpr size_t MaxStackIndex = 0x100; + constexpr size_t InvalidRegisterId = N; + bool register_modified[N] = {}; + std::array stack_offset_loaded = {}; + for (size_t i = 0; i < N; i++) { stack_offset_loaded[i] = MaxStackIndex; } + std::array stack_modified = {}; + for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; } + size_t stack_top = 0; + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind == MetaCode::OperationKind::Unpack) { + if (register_modified[mco.parameters[0]] || register_modified[mco.parameters[1]] || register_modified[mco.parameters[2]]) { + return false; + } + register_modified[mco.parameters[0]] = true; + register_modified[mco.parameters[1]] = true; + } else if (mco.kind == MetaCode::OperationKind::LoadFromStack) { + if (stack_offset_loaded[mco.parameters[0]] != MaxStackIndex) { + return false; + } + if (register_modified[mco.parameters[0]] != false) { + return false; + } + if (mco.parameters[1] >= MaxStackIndex) { + std::abort(); + } + stack_offset_loaded[mco.parameters[0]] = mco.parameters[1]; + register_modified[mco.parameters[0]] = true; + + const size_t offset = mco.parameters[1]; + const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2]; + for (size_t j = 0; j < width; j++) { + const size_t index = offset + j; + if (index >= MaxStackIndex) { + std::abort(); + } + if (stack_modified[index] != InvalidRegisterId) { + return false; + } + stack_modified[index] = mco.parameters[0]; + stack_top = std::max(index + 1, stack_top); + } + } else { + /* TODO: Better operation reasoning process. */ + return false; + } + } + + /* Any operations that don't load from stack, we can just re-add. */ + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind != MetaCode::OperationKind::LoadFromStack) { + out_mcg.AddOperationDirectly(mco); + } + } + constexpr auto PairKind = MetaCode::OperationKind::LoadPairFromStack; + constexpr auto SingleKind = MetaCode::OperationKind::LoadFromStack; + CoalesceOperations(out_mcg, stack_modified, stack_top); + + /* Ensure the out allocator state is correct. */ + out_allocator = allocator; + + return true; + } + + template + struct TypeIndexFilter { + template + static constexpr auto GetFilteredTupleImpl(UseArrayHolder, std::tuple, std::index_sequence) { + constexpr auto UseArray = UNWRAP_TEMPLATE_CONSTANT(UseArrayHolder); + static_assert(sizeof...(TailType) == sizeof...(TailIndex)); + static_assert(HeadIndex <= UseArray.size()); + + if constexpr (sizeof...(TailType) == 0) { + if constexpr (!UseArray[HeadIndex]) { + return std::tuple{}; + } else { + return std::tuple<>{}; + } + } else { + auto tail_tuple = GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::index_sequence{}); + if constexpr (!UseArray[HeadIndex]) { + return std::tuple_cat(std::tuple{}, tail_tuple); + } else { + return std::tuple_cat(std::tuple<>{}, tail_tuple); + } + } + } + + template + static constexpr auto GetFilteredTuple(UseArrayHolder) { + return GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::make_index_sequence()); + } + }; + + template + static constexpr auto GetModifiedOperations(AllocatorHolder, std::tuple ops) { + constexpr size_t ModifyRegister = [] { + auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder); + return allocator.AllocateFirstFree(); + }(); + + using ModifiedFirstOperation = typename FirstOperation::template ModifiedType; + using NewMoveOperation = typename LayoutConversionBase::template OperationMove; + return std::tuple{}; + } + + template + static constexpr auto GenerateBeforeOperations(MetaCodeGenerator &mcg, AllocatorHolder, std::tuple ops) -> RegisterAllocator { + constexpr size_t NumOperations = 1 + sizeof...(OtherOperations); + using OperationsTuple = decltype(ops); + using FilterHelper = TypeIndexFilter; + + constexpr auto ProcessOperation = [](MetaCodeGenerator &pr_mcg, auto &allocator, Operation) { + if (Conversion::template CanGenerateCode(allocator)) { + Conversion::template GenerateCode(pr_mcg, allocator); + return true; + } + return false; + }; + + constexpr auto ProcessResults = [ProcessOperation](std::tuple) { + auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder); + MetaCodeGenerator pr_mcg; + auto use_array = std::array{ ProcessOperation(pr_mcg, allocator, Operations{})... }; + return std::make_tuple(use_array, allocator, pr_mcg); + }(OperationsTuple{}); + + constexpr auto CanGenerate = std::get<0>(ProcessResults); + constexpr auto AfterAllocator = std::get<1>(ProcessResults); + constexpr auto GeneratedCode = std::get<2>(ProcessResults).GetMetaCode(); + + for (size_t i = 0; i < GeneratedCode.GetNumOperations(); i++) { + mcg.AddOperationDirectly(GeneratedCode.GetOperation(i)); + } + + constexpr auto FilteredOperations = FilterHelper::template GetFilteredTuple(WRAP_TEMPLATE_CONSTANT(CanGenerate)); + static_assert(std::tuple_size::value <= NumOperations); + if constexpr (std::tuple_size::value > 0) { + if constexpr (std::tuple_size::value != NumOperations) { + return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations); + } else { + /* No progress was made, so we need to make a change. */ + constexpr auto ModifiedOperations = GetModifiedOperations(WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations); + return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), ModifiedOperations); + } + } else { + return AfterAllocator; + } + } + + static constexpr MetaCode GenerateOriginalBeforeMetaCode() { + MetaCodeGenerator mcg; + RegisterAllocator allocator; + static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount); + + /* Reserve registers used by the input layout. */ + constexpr auto InitialAllocator = [] { + RegisterAllocator initial_allocator; + for (size_t i = 0; i < SvcAbiType::RegisterCount; i++) { + if (Conversion::LayoutForSvc.GetInputLayout().UsesRegister(i)) { + initial_allocator.Allocate(i); + } + } + return initial_allocator; + }(); + + /* Save every register that needs to be preserved to the stack. */ + if constexpr (Conversion::NumPreserveRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template SaveRegisters(); + }(typename Conversion::PreserveRegisters{}); + } + + /* Allocate space on the stack for parameters that need it. */ + if constexpr (UsedStackSpace > 0) { + mcg.template AllocateStackSpace(); + } + + /* Generate code for before operations. */ + if constexpr (Conversion::NumBeforeOperations > 0) { + allocator = GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(InitialAllocator), typename Conversion::BeforeOperations{}); + } else { + allocator = InitialAllocator; + } + + /* Generate code for after operations. */ + if constexpr (Conversion::NumAfterOperations > 0) { + if (!TryPrepareForKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) { + /* We're not eligible for the straightforward optimization. */ + [&mcg, &allocator](std::index_sequence) { + (Conversion::template GenerateCode::type, CodeGenerationKind::PrepareForKernelProcedureToSvcInvocation>(mcg, allocator), ...); + }(std::make_index_sequence()); + } + } + + return mcg.GetMetaCode(); + } + public: + using SvcAbiType = _SvcAbiType; + using UserAbiType = _UserAbiType; + using KernelAbiType = _KernelAbiType; + + using Conversion = LayoutConversion; + + static constexpr size_t UsedStackSpace = Conversion::NonAbiUsedStackIndices * KernelAbiType::RegisterSize; + + static constexpr MetaCode OriginalBeforeMetaCode = [] { + return GenerateOriginalBeforeMetaCode(); + }(); + + static constexpr MetaCode OriginalAfterMetaCode = [] { + MetaCodeGenerator mcg; + RegisterAllocator allocator; + static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount); + + /* Generate code for after operations. */ + if constexpr (Conversion::NumAfterOperations > 0) { + if (!TryKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) { + [&mcg, &allocator](std::index_sequence) { + (Conversion::template GenerateCode::type, CodeGenerationKind::KernelProcedureToSvcInvocation>(mcg, allocator), ...); + }(std::make_index_sequence()); + } + } + + /* Allocate space on the stack for parameters that need it. */ + if constexpr (UsedStackSpace > 0) { + mcg.template FreeStackSpace(); + } + + if constexpr (Conversion::NumClearRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template ClearRegisters(); + }(typename Conversion::ClearRegisters{}); + } + + /* Restore registers we previously saved to the stack. */ + if constexpr (Conversion::NumPreserveRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template RestoreRegisters(); + }(typename Conversion::PreserveRegisters{}); + } + + return mcg.GetMetaCode(); + }(); + + /* TODO: Implement meta code optimization via separate layer. */ + /* Right now some basic optimizations are just implemented by the above generators. */ + static constexpr MetaCode OptimizedBeforeMetaCode = OriginalBeforeMetaCode; + static constexpr MetaCode OptimizedAfterMetaCode = OriginalAfterMetaCode; + }; + + template + class KernelSvcWrapperHelper { + private: + using Traits = FunctionTraits; + public: + using Impl = KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, typename Traits::ReturnType, typename Traits::ArgsType>; + + static constexpr bool IsAarch64Kernel = std::is_same<_KernelAbiType, Aarch64Lp64Abi>::value; + static constexpr bool IsAarch32Kernel = std::is_same<_KernelAbiType, Aarch32Ilp32Abi>::value; + static_assert(IsAarch64Kernel || IsAarch32Kernel); + + using CodeGenerator = typename std::conditional::type; + + static constexpr auto BeforeMetaCode = Impl::OptimizedBeforeMetaCode; + static constexpr auto AfterMetaCode = Impl::OptimizedAfterMetaCode; + + +/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ +#pragma GCC push_options +#pragma GCC optimize ("omit-frame-pointer") + + static ALWAYS_INLINE void WrapSvcFunction() { + /* Generate appropriate assembly. */ + GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(BeforeMetaCode)); + ON_SCOPE_EXIT { GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(AfterMetaCode)); }; + + return reinterpret_cast(Function)(); + } + +#pragma GCC pop_options + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp new file mode 100644 index 000000000..132b13ae1 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" + +namespace ams::svc::codegen::impl { + + class ParameterLayout { + public: + static constexpr size_t MaxParameters = 8; + private: + static constexpr size_t InvalidIndex = std::numeric_limits::max(); + private: + /* ABI parameters. */ + Abi abi; + + /* Parameter storage. */ + size_t num_parameters; + Parameter parameters[MaxParameters]; + public: + constexpr explicit ParameterLayout(Abi a) + : abi(a), num_parameters(0), parameters() + { /* ... */ } + + constexpr void AddSingle(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t idx) { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + this->parameters[i].AddLocation(Location(s, idx)); + return; + } + } + this->parameters[this->num_parameters++] = Parameter(id, type, ts, ps, p, Location(s, idx)); + } + + constexpr size_t Add(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t i) { + size_t required_registers = 0; + + while (required_registers * this->abi.register_size < ps) { + this->AddSingle(id, type, ts, ps, p, s, i++); + required_registers++; + } + + return required_registers; + } + + constexpr bool UsesLocation(Location l) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].UsesLocation(l)) { + return true; + } + } + return false; + } + + constexpr bool UsesRegister(size_t i) const { + return this->UsesLocation(Location(Storage::Register, i)); + } + + constexpr bool IsRegisterFree(size_t i) const { + return !(this->UsesRegister(i)); + } + + constexpr size_t GetNumParameters() const { + return this->num_parameters; + } + + constexpr Parameter GetParameter(size_t i) const { + return this->parameters[i]; + } + + constexpr bool HasParameter(Parameter::Identifier id) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + return true; + } + } + return false; + } + + constexpr Parameter GetParameter(Parameter::Identifier id) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + return this->parameters[i]; + } + } + std::abort(); + } + }; + + class ProcedureLayout { + private: + Abi abi; + ParameterLayout input; + ParameterLayout output; + private: + template + constexpr void ProcessArgument(size_t i, size_t &NGRN, size_t &NSAA) { + /* We currently don't implement support for floating point types. */ + static_assert(!std::is_floating_point::value); + static_assert(!std::is_same::value); + + constexpr size_t ArgumentTypeSize = AbiType::template Size; + constexpr bool PassedByPointer = IsPassedByPointer; + constexpr size_t ArgumentPassSize = PassedByPointer ? AbiType::PointerSize : ArgumentTypeSize; + + /* TODO: Is there ever a case where this is not the correct alignment? */ + constexpr size_t ArgumentAlignment = ArgumentPassSize; + + /* Ensure NGRN is aligned. */ + if constexpr (ArgumentAlignment > AbiType::RegisterSize) { + NGRN += (NGRN & 1); + } + + /* TODO: We don't support splitting arguments between registers and stack, but AAPCS32 does. */ + /* Is this a problem? Nintendo seems to not ever do this. */ + + auto id = Parameter::Identifier("FunctionParameter", i); + + /* Allocate integral types specially per aapcs. */ + constexpr ArgumentType Type = GetArgumentType; + const size_t registers_available = AbiType::RegisterCount - NGRN; + if constexpr (!PassedByPointer && IsIntegralOrUserPointer && ArgumentTypeSize > AbiType::RegisterSize) { + if (registers_available >= 2) { + this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN); + NGRN += 2; + } else { + /* Argument went on stack, so stop allocating arguments in registers. */ + NGRN = AbiType::RegisterCount; + + NSAA += (NSAA & 1); + this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA); + NSAA += 2; + } + } else { + if (ArgumentPassSize <= AbiType::RegisterSize * registers_available) { + NGRN += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN); + } else { + /* Argument went on stack, so stop allocating arguments in registers. */ + NGRN = AbiType::RegisterCount; + + /* TODO: Stack pointer alignment is only ensured for aapcs64. */ + /* What should we do here? */ + + NSAA += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA); + } + } + } + public: + constexpr explicit ProcedureLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ } + + template + static constexpr ProcedureLayout Create() { + ProcedureLayout layout(Abi::Convert()); + + /* 1. The Next General-purpose Register Number (NGRN) is set to zero. */ + [[maybe_unused]] size_t NGRN = 0; + + /* 2. The next stacked argument address (NSAA) is set to the current stack-pointer value (SP). */ + [[maybe_unused]] size_t NSAA = 0; /* Should be considered an offset from stack pointer. */ + + /* 3. Handle the return type. */ + /* TODO: It's unclear how to handle the non-integral and too-large case. */ + if constexpr (!std::is_same::value) { + constexpr size_t ReturnTypeSize = AbiType::template Size; + layout.output.Add(Parameter::Identifier("ReturnType"), ArgumentType::Invalid, ReturnTypeSize, ReturnTypeSize, false, Storage::Register, 0); + static_assert(IsIntegral || ReturnTypeSize <= AbiType::RegisterSize); + } + + /* Process all arguments, in order. */ + size_t i = 0; + (layout.ProcessArgument(i++, NGRN, NSAA), ...); + + return layout; + } + + constexpr ParameterLayout GetInputLayout() const { + return this->input; + } + + constexpr ParameterLayout GetOutputLayout() const { + return this->output; + } + + constexpr Parameter GetParameter(Parameter::Identifier id) const { + if (this->input.HasParameter(id)) { + return this->input.GetParameter(id); + } else { + return this->output.GetParameter(id); + } + } + }; + + class SvcInvocationLayout { + private: + Abi abi; + ParameterLayout input; + ParameterLayout output; + private: + template + constexpr void ForEachInputArgument(ParameterLayout param_layout, F f) { + /* We want to iterate over the parameters in sorted order. */ + std::array map = {}; + const size_t num_parameters = param_layout.GetNumParameters(); + for (size_t i = 0; i < num_parameters; i++) { + map[i] = i; + } + for (size_t i = 1; i < num_parameters; i++) { + for (size_t j = i; j > 0 && param_layout.GetParameter(map[j-1]).GetLocation(0) > param_layout.GetParameter(map[j]).GetLocation(0); j--) { + /* std::swap is not constexpr until c++20 :( */ + /* TODO: std::swap(map[j], map[j-1]); */ + const size_t tmp = map[j]; + map[j] = map[j-1]; + map[j-1] = tmp; + } + } + + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::In && !Parameter.IsPassedByPointer()) { + f(Parameter); + } + } + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::InUserPointer) { + f(Parameter); + } + } + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::OutUserPointer) { + f(Parameter); + } + } + } + + template + constexpr void ForEachInputPointerArgument(ParameterLayout param_layout, F f) { + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(i); + if (Parameter.GetArgumentType() == ArgumentType::In && Parameter.IsPassedByPointer()) { + f(Parameter); + } + } + } + + template + constexpr void ForEachOutputArgument(ParameterLayout param_layout, F f) { + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(i); + if (Parameter.GetArgumentType() == ArgumentType::Out) { + f(Parameter); + } + } + } + + template + static constexpr void AddRegisterParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + for (size_t i = 0; i < param.GetNumLocations(); i++) { + const auto location = param.GetLocation(i); + if (location.GetStorage() == Storage::Register) { + reg_allocator.Allocate(location.GetIndex()); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, location.GetIndex()); + } + } + } + + template + static constexpr void AddStackParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + for (size_t i = 0; i < param.GetNumLocations(); i++) { + const auto location = param.GetLocation(i); + if (location.GetStorage() == Storage::Stack) { + const size_t free_reg = reg_allocator.AllocateFirstFree(); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, free_reg); + } + } + } + + template + static constexpr void AddIndirectParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + const size_t type_size = param.GetTypeSize(); + for (size_t sz = 0; sz < type_size; sz += AbiType::RegisterSize) { + const size_t free_reg = reg_allocator.AllocateFirstFree(); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), type_size, type_size, false, Storage::Register, free_reg); + } + } + public: + constexpr explicit SvcInvocationLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ } + + template + static constexpr SvcInvocationLayout Create(ProcedureLayout procedure_layout) { + SvcInvocationLayout layout(Abi::Convert()); + RegisterAllocator input_register_allocator, output_register_allocator; + + /* Input first wants to map in register -> register */ + layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddRegisterParameter(layout.input, input_register_allocator, parameter); + }); + + /* And then input wants to map in stack -> stack */ + layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddStackParameter(layout.input, input_register_allocator, parameter); + }); + + /* And then input wants to map in indirects -> register */ + layout.ForEachInputPointerArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddIndirectParameter(layout.input, input_register_allocator, parameter); + }); + + /* Handle the return type. */ + if (procedure_layout.GetOutputLayout().GetNumParameters() > 0) { + if (procedure_layout.GetOutputLayout().GetNumParameters() != 1) { + std::abort(); + } + const auto return_param = procedure_layout.GetOutputLayout().GetParameter(0); + if (return_param.GetIdentifier() != Parameter::Identifier("ReturnType")) { + std::abort(); + } + AddRegisterParameter(layout.output, output_register_allocator, return_param); + } + + /* Handle other outputs. */ + layout.ForEachOutputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddIndirectParameter(layout.output, output_register_allocator, parameter); + }); + + return layout; + } + + constexpr ParameterLayout GetInputLayout() const { + return this->input; + } + + constexpr ParameterLayout GetOutputLayout() const { + return this->output; + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp new file mode 100644 index 000000000..2e3d95775 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" +#include "svc_codegen_impl_layout.hpp" +#include "svc_codegen_impl_meta_code.hpp" + +namespace ams::svc::codegen::impl { + + class LayoutConversionBase { + public: + enum class OperationKind { + Move, + LoadAndStore, + PackAndUnpack, + Scatter, + Invalid, + }; + + class OperationMoveImpl; + class OperationLoadAndStoreImpl; + class OperationPackAndUnpackImpl; + class OperationScatterImpl; + + class OperationBase{}; + + template + class Operation : public OperationBase { + public: + static constexpr OperationKind Kind = _Kind; + static constexpr size_t RegisterSize = RS; + static constexpr size_t PassedSize = PS; + static constexpr size_t StackOffset = SO; + static constexpr size_t ProcedureIndex = PIdx; + + static constexpr size_t NumSvcIndices = sizeof...(SIdx); + static constexpr std::array SvcIndices = { SIdx... }; + static constexpr std::index_sequence SvcIndexSequence = {}; + + template + static constexpr size_t SvcIndex = SvcIndices[I]; + + template + static void ForEachSvcIndex(F f) { + (f(SIdx), ...); + } + + using ImplType = typename std::conditional::type>::type>::type>::type; + + template + using ModifiedType = Operation; + }; + + template + using OperationMove = Operation; + + template + using OperationLoadAndStore = Operation; + + template + using OperationPackAndUnpack = Operation; + + template + using OperationScatter = Operation; + + class OperationMoveImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::Move); + allocator.Free(Operation::template SvcIndex<0>); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::Move); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Allocate(Operation::ProcedureIndex); + mcg.template MoveRegister>(); + } + }; + + class OperationLoadAndStoreImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::LoadAndStore); + allocator.Free(Operation::template SvcIndex<0>); + return true; + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::LoadAndStore); + allocator.Free(Operation::template SvcIndex<0>); + constexpr size_t StackOffset = Operation::ProcedureIndex * Operation::RegisterSize; + mcg.template StoreToStack, StackOffset>(); + } + }; + + class OperationPackAndUnpackImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Free(Operation::template SvcIndex<1>); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Free(Operation::template SvcIndex<1>); + allocator.Allocate(Operation::ProcedureIndex); + mcg.template Pack, Operation::template SvcIndex<1>>(); + } + + template + static constexpr void GenerateCodeForPrepareForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + /* ... */ + } + + template + static constexpr void GenerateCodeForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + mcg.template Unpack, Operation::template SvcIndex<1>, Operation::ProcedureIndex>(); + } + }; + + class OperationScatterImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::Scatter); + [&allocator](std::index_sequence) { + (allocator.Free(SvcIndex), ...); + }(Operation::SvcIndexSequence); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::Scatter); + [&allocator](std::index_sequence) { + (allocator.Free(SvcIndex), ...); + }(Operation::SvcIndexSequence); + allocator.Allocate(Operation::ProcedureIndex); + + [&mcg](std::index_sequence) { + (mcg.template StoreToStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + + mcg.template LoadStackAddress(); + } + + template + static constexpr void GenerateCodeForPrepareForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::Scatter); + + [&mcg](std::index_sequence) { + (mcg.template StoreToStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + + mcg.template LoadStackAddress(); + } + + template + static constexpr void GenerateCodeForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::Scatter); + + [&mcg](std::index_sequence) { + (mcg.template LoadFromStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + } + }; + }; + + template + class LayoutConversion { + public: + using SvcAbiType = _SvcAbiType; + using UserAbiType = _UserAbiType; + using KernelAbiType = _KernelAbiType; + + static constexpr auto LayoutForUser = ProcedureLayout::Create(); + static constexpr auto LayoutForSvc = SvcInvocationLayout::Create(LayoutForUser); + static constexpr auto LayoutForKernel = ProcedureLayout::Create(); + private: + template + static constexpr size_t DetermineUsedStackIndices() { + [[maybe_unused]] constexpr auto Procedure = LayoutForKernel; + [[maybe_unused]] constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + + if constexpr (ParameterIndex >= Svc.GetNumParameters()) { + /* Base case: we're done. */ + return Used; + } else { + /* We're processing more parameters. */ + constexpr Parameter SvcParam = Svc.GetParameter(ParameterIndex); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + if constexpr (SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()) { + /* We're not scattering, so stack won't be used. */ + return DetermineUsedStackIndices(); + } else { + /* We're scattering, and so we're using stack. */ + static_assert(ProcedureParam.GetNumLocations() == 1); + + constexpr size_t IndicesPerRegister = KernelAbiType::RegisterSize / SvcAbiType::RegisterSize; + static_assert(IndicesPerRegister > 0); + + constexpr size_t RequiredCount = util::AlignUp(SvcParam.GetNumLocations(), IndicesPerRegister) / IndicesPerRegister; + + return DetermineUsedStackIndices(); + } + } + } + + static constexpr size_t AbiUsedStackIndices = [] { + constexpr auto KernLayout = LayoutForKernel.GetInputLayout(); + + size_t used = 0; + for (size_t i = 0; i < KernLayout.GetNumParameters(); i++) { + const auto Param = KernLayout.GetParameter(i); + for (size_t j = 0; j < Param.GetNumLocations(); j++) { + const auto Loc = Param.GetLocation(j); + if (Loc.GetStorage() == Storage::Stack) { + used = std::max(used, Loc.GetIndex() + 1); + } + } + } + + return used; + }(); + + static constexpr size_t BeforeUsedStackIndices = DetermineUsedStackIndices(); + static constexpr size_t AfterUsedStackIndices = DetermineUsedStackIndices(); + + template + static constexpr auto ZipMoveOperations() { + constexpr auto Procedure = LayoutForKernel; + constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + + static_assert(ParameterIndex < Svc.GetNumParameters()); + + constexpr Parameter SvcParam = Svc.GetParameter(ParameterIndex); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + static_assert(SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()); + static_assert(SvcParam.GetNumLocations() == ProcedureParam.GetNumLocations()); + + if constexpr (LocationIndex >= SvcParam.GetNumLocations()) { + /* Base case: we're done. */ + return std::tuple<>{}; + } else { + constexpr Location SvcLoc = SvcParam.GetLocation(LocationIndex); + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(LocationIndex); + + if constexpr (SvcLoc == ProcedureLoc) { + /* No need to emit an operation if we're not changing where we are. */ + return ZipMoveOperations(); + } else { + /* Svc location needs to be in a register. */ + static_assert(SvcLoc.GetStorage() == Storage::Register); + + constexpr size_t Size = KernelAbiType::RegisterSize; + + if constexpr (ProcedureLoc.GetStorage() == Storage::Register) { + using OperationType = LayoutConversionBase::OperationMove; + constexpr auto cur_op = std::make_tuple(OperationType{}); + return std::tuple_cat(cur_op, ZipMoveOperations()); + } else { + using OperationType = LayoutConversionBase::OperationLoadAndStore; + constexpr auto cur_op = std::make_tuple(OperationType{}); + return std::tuple_cat(cur_op, ZipMoveOperations()); + } + } + } + } + + template + static constexpr auto DetermineConversionOperations() { + [[maybe_unused]] constexpr auto Procedure = LayoutForKernel; + [[maybe_unused]] constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + [[maybe_unused]] constexpr std::array ParameterMap = [](SvcHolder){ + /* We want to iterate over the parameters in sorted order. */ + constexpr ParameterLayout CapturedSvc = UNWRAP_TEMPLATE_CONSTANT(SvcHolder); + std::array map{}; + const size_t num_parameters = CapturedSvc.GetNumParameters(); + for (size_t i = 0; i < num_parameters; i++) { + map[i] = i; + } + for (size_t i = 1; i < num_parameters; i++) { + for (size_t j = i; j > 0 && CapturedSvc.GetParameter(map[j-1]).GetLocation(0) > CapturedSvc.GetParameter(map[j]).GetLocation(0); j--) { + /* std::swap is not constexpr until c++20 :( */ + /* TODO: std::swap(map[j], map[j-1]); */ + const size_t tmp = map[j]; + map[j] = map[j-1]; + map[j-1] = tmp; + } + } + return map; + }(WRAP_TEMPLATE_CONSTANT(Svc)); + + if constexpr (ParameterIndex >= Svc.GetNumParameters()) { + /* Base case: we're done. */ + if constexpr (Input) { + static_assert(StackIndex == BeforeUsedStackIndices + AbiUsedStackIndices); + } else { + static_assert(StackIndex == AfterUsedStackIndices + BeforeUsedStackIndices + AbiUsedStackIndices); + } + return std::tuple<>{}; + } else { + /* We're processing more parameters. */ + constexpr Parameter SvcParam = Svc.GetParameter(ParameterMap[ParameterIndex]); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + if constexpr (SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()) { + if constexpr (SvcParam.GetNumLocations() == ProcedureParam.GetNumLocations()) { + /* Normal moves and loads/stores. */ + return std::tuple_cat(ZipMoveOperations(), DetermineConversionOperations()); + } else { + /* We're packing. */ + /* Make sure we're handling the 2 -> 1 case. */ + static_assert(SvcParam.GetNumLocations() == 2); + static_assert(ProcedureParam.GetNumLocations() == 1); + + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(0); + constexpr Location SvcLoc0 = SvcParam.GetLocation(0); + constexpr Location SvcLoc1 = SvcParam.GetLocation(1); + static_assert(ProcedureLoc.GetStorage() == Storage::Register); + static_assert(SvcLoc0.GetStorage() == Storage::Register); + static_assert(SvcLoc1.GetStorage() == Storage::Register); + + constexpr size_t Size = KernelAbiType::RegisterSize; + + using OperationType = LayoutConversionBase::OperationPackAndUnpack; + + constexpr auto cur_op = std::make_tuple(OperationType{}); + + return std::tuple_cat(cur_op, DetermineConversionOperations()); + } + } else { + /* One operation, since we're scattering. */ + static_assert(ProcedureParam.GetNumLocations() == 1); + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(0); + + constexpr size_t IndicesPerRegister = KernelAbiType::RegisterSize / SvcAbiType::RegisterSize; + static_assert(IndicesPerRegister > 0); + + constexpr size_t RequiredCount = util::AlignUp(SvcParam.GetNumLocations(), IndicesPerRegister) / IndicesPerRegister; + + if constexpr (ProcedureLoc.GetStorage() == Storage::Register) { + /* Scattering. In register during kernel call. */ + constexpr size_t RegisterSize = SvcAbiType::RegisterSize; + constexpr size_t PassedSize = ProcedureParam.GetTypeSize(); + + /* TODO: C++20 templated lambdas. For now, use GCC extension syntax. */ + constexpr auto SvcIndexSequence = [](SvcParamWrapper, std::index_sequence) { + constexpr Parameter CapturedSvcParam = UNWRAP_TEMPLATE_CONSTANT(SvcParamWrapper); + return std::index_sequence{}; + }(WRAP_TEMPLATE_CONSTANT(SvcParam), std::make_index_sequence()); + + constexpr auto OperationValue = [](ProcedureLocWrapper, std::index_sequence) { + constexpr Location CapturedProcedureLoc = UNWRAP_TEMPLATE_CONSTANT(ProcedureLocWrapper); + return LayoutConversionBase::OperationScatter{}; + }(WRAP_TEMPLATE_CONSTANT(ProcedureLoc), SvcIndexSequence); + + constexpr auto cur_op = std::make_tuple(OperationValue); + + return std::tuple_cat(cur_op, DetermineConversionOperations()); + } else { + /* TODO: How should on-stack-during-kernel-call be handled? */ + static_assert(ProcedureLoc.GetStorage() == Storage::Register); + } + } + } + } + + static constexpr size_t PreserveRegisterStartIndex = SvcAbiType::ArgumentRegisterCount; + static constexpr size_t PreserveRegisterEndIndex = std::min(KernelAbiType::ArgumentRegisterCount, SvcAbiType::RegisterCount); + static constexpr size_t ClearRegisterStartIndex = 0; + static constexpr size_t ClearRegisterEndIndex = std::min(KernelAbiType::ArgumentRegisterCount, SvcAbiType::RegisterCount); + + template + static constexpr bool ShouldPreserveRegister = (PreserveRegisterStartIndex <= Index && Index < PreserveRegisterEndIndex) && + LayoutForSvc.GetInputLayout().IsRegisterFree(Index) && LayoutForSvc.GetOutputLayout().IsRegisterFree(Index); + + template + static constexpr bool ShouldClearRegister = (ClearRegisterStartIndex <= Index && Index < ClearRegisterEndIndex) && + LayoutForSvc.GetOutputLayout().IsRegisterFree(Index) && !ShouldPreserveRegister; + + template + static constexpr auto DeterminePreserveRegisters() { + static_assert(PreserveRegisterStartIndex <= Index && Index <= PreserveRegisterEndIndex); + + if constexpr (Index >= PreserveRegisterEndIndex) { + /* Base case: we're done. */ + return std::index_sequence<>{}; + } else { + if constexpr (ShouldPreserveRegister) { + /* Preserve this register. */ + return IndexSequenceCat(std::index_sequence{}, DeterminePreserveRegisters()); + } else { + /* We don't need to preserve register, so we can skip onwards. */ + return IndexSequenceCat(std::index_sequence<>{}, DeterminePreserveRegisters()); + } + } + } + + template + static constexpr auto DetermineClearRegisters() { + static_assert(ClearRegisterStartIndex <= Index && Index <= ClearRegisterEndIndex); + + if constexpr (Index >= ClearRegisterEndIndex) { + /* Base case: we're done. */ + return std::index_sequence<>{}; + } else { + if constexpr (ShouldClearRegister) { + /* Clear this register. */ + return IndexSequenceCat(std::index_sequence{}, DetermineClearRegisters()); + } else { + /* We don't need to preserve register, so we can skip onwards. */ + return IndexSequenceCat(std::index_sequence<>{}, DetermineClearRegisters()); + } + } + } + public: + static constexpr size_t NonAbiUsedStackIndices = AfterUsedStackIndices + BeforeUsedStackIndices; + using BeforeOperations = decltype(DetermineConversionOperations()); + using AfterOperations = decltype(DetermineConversionOperations()); + + static constexpr size_t NumBeforeOperations = std::tuple_size::value; + static constexpr size_t NumAfterOperations = std::tuple_size::value; + + using PreserveRegisters = decltype(DeterminePreserveRegisters()); + using ClearRegisters = decltype(DetermineClearRegisters()); + + static constexpr size_t NumPreserveRegisters = PreserveRegisters::size(); + static constexpr size_t NumClearRegisters = ClearRegisters::size(); + + static constexpr auto PreserveRegistersArray = ConvertToArray(PreserveRegisters{}); + static constexpr auto ClearRegistersArray = ConvertToArray(ClearRegisters{}); + public: + template + static constexpr bool CanGenerateCode(RegisterAllocator allocator) { + if constexpr (CodeGenKind == CodeGenerationKind::SvcInvocationToKernelProcedure) { + return Operation::ImplType::template CanGenerateCodeForSvcInvocationToKernelProcedure(allocator); + } else { + static_assert(CodeGenKind != CodeGenKind, "Invalid CodeGenerationKind"); + } + } + + template + static constexpr void GenerateCode(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + if constexpr (CodeGenKind == CodeGenerationKind::SvcInvocationToKernelProcedure) { + Operation::ImplType::template GenerateCodeForSvcInvocationToKernelProcedure(mcg, allocator); + } else if constexpr (CodeGenKind == CodeGenerationKind::PrepareForKernelProcedureToSvcInvocation) { + Operation::ImplType::template GenerateCodeForPrepareForKernelProcedureToSvcInvocation(mcg); + } else if constexpr (CodeGenKind == CodeGenerationKind::KernelProcedureToSvcInvocation) { + Operation::ImplType::template GenerateCodeForKernelProcedureToSvcInvocation(mcg); + } else { + static_assert(CodeGenKind != CodeGenKind, "Invalid CodeGenerationKind"); + } + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp new file mode 100644 index 000000000..682c29237 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + class MetaCode { + public: + static constexpr size_t MaxOperations = 0x40; + + enum class OperationKind { + SaveRegisters, + RestoreRegisters, + ClearRegisters, + AllocateStackSpace, + FreeStackSpace, + MoveRegister, + LoadFromStack, + LoadPairFromStack, + StoreToStack, + StorePairToStack, + Pack, + Unpack, + LoadStackAddress, + }; + + static constexpr const char *GetOperationKindName(OperationKind k) { + #define META_CODE_OPERATION_KIND_ENUM_CASE(s) case OperationKind::s: return #s + switch (k) { + META_CODE_OPERATION_KIND_ENUM_CASE(SaveRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(RestoreRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(ClearRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(AllocateStackSpace); + META_CODE_OPERATION_KIND_ENUM_CASE(FreeStackSpace); + META_CODE_OPERATION_KIND_ENUM_CASE(MoveRegister); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadFromStack); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadPairFromStack); + META_CODE_OPERATION_KIND_ENUM_CASE(StoreToStack); + META_CODE_OPERATION_KIND_ENUM_CASE(StorePairToStack); + META_CODE_OPERATION_KIND_ENUM_CASE(Pack); + META_CODE_OPERATION_KIND_ENUM_CASE(Unpack); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadStackAddress); + default: + std::abort(); + } + #undef META_CODE_OPERATION_KIND_ENUM_CASE + } + + struct Operation { + OperationKind kind; + size_t num_parameters; + size_t parameters[16]; + }; + + template + static constexpr inline Operation MakeOperation() { + Operation op = {}; + static_assert(sizeof...(Is) <= sizeof(op.parameters) / sizeof(op.parameters[0])); + + op.kind = Kind; + op.num_parameters = sizeof...(Is); + + size_t i = 0; + ((op.parameters[i++] = Is), ...); + + return op; + } + private: + size_t num_operations; + std::array operations; + public: + constexpr explicit MetaCode() : num_operations(0), operations() { /* ... */ } + + constexpr size_t GetNumOperations() const { + return this->num_operations; + } + + constexpr Operation GetOperation(size_t i) const { + return this->operations[i]; + } + + constexpr void AddOperation(Operation op) { + this->operations[this->num_operations++] = op; + } + }; + + template + static constexpr auto GetOperationParameterSequence() { + constexpr auto _Operation = UNWRAP_TEMPLATE_CONSTANT(_OperationHolder); + constexpr size_t NumParameters = _Operation.num_parameters; + + return [](OperationHolder, std::index_sequence) { + constexpr auto Operation = UNWRAP_TEMPLATE_CONSTANT(OperationHolder); + return std::index_sequence{}; + }(_OperationHolder{}, std::make_index_sequence()); + } + + template + static ALWAYS_INLINE void GenerateCodeForOperationImpl(std::index_sequence) { + #define META_CODE_OPERATION_KIND_GENERATE_CODE(KIND) else if constexpr (Kind == MetaCode::OperationKind::KIND) { CodeGenerator::template KIND(); } + if constexpr (false) { /* ... */ } + META_CODE_OPERATION_KIND_GENERATE_CODE(SaveRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(RestoreRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(ClearRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(AllocateStackSpace) + META_CODE_OPERATION_KIND_GENERATE_CODE(FreeStackSpace) + META_CODE_OPERATION_KIND_GENERATE_CODE(MoveRegister) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadFromStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadPairFromStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(StoreToStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(StorePairToStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(Pack) + META_CODE_OPERATION_KIND_GENERATE_CODE(Unpack) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadStackAddress) + else { static_assert(Kind != Kind, "Unknown MetaOperationKind"); } + #undef META_CODE_OPERATION_KIND_GENERATE_CODE + } + + template + static ALWAYS_INLINE void GenerateCodeForOperation(OperationHolder) { + constexpr auto Operation = UNWRAP_TEMPLATE_CONSTANT(OperationHolder); + GenerateCodeForOperationImpl(GetOperationParameterSequence()); + } + + class MetaCodeGenerator { + private: + using OperationKind = typename MetaCode::OperationKind; + private: + MetaCode meta_code; + public: + constexpr explicit MetaCodeGenerator() : meta_code() { /* ... */ } + + constexpr MetaCode GetMetaCode() const { + return this->meta_code; + } + + constexpr void AddOperationDirectly(MetaCode::Operation op) { + this->meta_code.AddOperation(op); + } + + template + constexpr void SaveRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void RestoreRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void ClearRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void AllocateStackSpace() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void FreeStackSpace() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void MoveRegister() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadFromStack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadPairFromStack() { + static_assert(Offset % Size == 0); + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void StoreToStack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void StorePairToStack() { + static_assert(Offset % Size == 0); + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void Pack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void Unpack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadStackAddress() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + }; + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp new file mode 100644 index 000000000..c97bcb3f1 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + enum class Storage { + Register, + Stack, + Invalid, + }; + + class Location { + private: + static constexpr size_t InvalidIndex = std::numeric_limits::max(); + private: + Storage storage; + size_t index; + public: + constexpr explicit Location() : storage(Storage::Invalid), index(InvalidIndex) { /* ... */ } + constexpr explicit Location(Storage s, size_t i) : storage(s), index(i) { /* ... */ } + + constexpr size_t GetIndex() const { return this->index; } + constexpr Storage GetStorage() const { return this->storage; } + + constexpr bool IsValid() const { + return this->index != InvalidIndex && this->storage != Storage::Invalid; + } + + constexpr bool operator==(const Location &rhs) const { + return this->index == rhs.index && this->storage == rhs.storage; + } + + constexpr bool operator<(const Location &rhs) const { + if (this->storage < rhs.storage) { + return true; + } else if (this->storage > rhs.storage) { + return false; + } else { + return this->index < rhs.index; + } + } + + constexpr bool operator>(const Location &rhs) const { + if (this->storage > rhs.storage) { + return true; + } else if (this->storage < rhs.storage) { + return false; + } else { + return this->index > rhs.index; + } + } + + constexpr bool operator!=(const Location &rhs) const { + return !(*this == rhs); + } + }; + + class Parameter { + public: + static constexpr size_t MaxLocations = 8; + static constexpr size_t IdentifierLengthMax = 0x40; + class Identifier { + private: + char name[IdentifierLengthMax]; + size_t index; + public: + constexpr explicit Identifier() : name(), index() { /* ... */ } + constexpr explicit Identifier(const char *nm, size_t idx = 0) : name(), index(idx) { + for (size_t i = 0; i < IdentifierLengthMax && nm[i]; i++) { + this->name[i] = nm[i]; + } + } + + constexpr bool operator==(const Identifier &rhs) const { + for (size_t i = 0; i < IdentifierLengthMax; i++) { + if (this->name[i] != rhs.name[i]) { + return false; + } + } + return this->index == rhs.index; + } + + constexpr bool operator!=(const Identifier &rhs) const { + return !(*this == rhs); + } + }; + private: + Identifier identifier; + ArgumentType type; + size_t type_size; + size_t passed_size; + bool passed_by_pointer; + size_t num_locations; + Location locations[MaxLocations]; + public: + constexpr explicit Parameter() + : identifier(), type(ArgumentType::Invalid), type_size(0), passed_size(0), passed_by_pointer(0), num_locations(0), locations() + { /* ... */ } + + constexpr explicit Parameter(Identifier id, ArgumentType t, size_t ts, size_t ps, bool p, Location l) + : identifier(id), type(t), type_size(ts), passed_size(ps), passed_by_pointer(p), num_locations(1), locations() + { + this->locations[0] = l; + } + + constexpr Identifier GetIdentifier() const { + return this->identifier; + } + + constexpr bool Is(Identifier rhs) const { + return this->identifier == rhs; + } + + constexpr ArgumentType GetArgumentType() const { + return this->type; + } + + constexpr size_t GetTypeSize() const { + return this->type_size; + } + + constexpr size_t GetPassedSize() const { + return this->passed_size; + } + + constexpr bool IsPassedByPointer() const { + return this->passed_by_pointer; + } + + constexpr size_t GetNumLocations() const { + return this->num_locations; + } + + constexpr Location GetLocation(size_t i) const { + return this->locations[i]; + } + + constexpr void AddLocation(Location l) { + this->locations[this->num_locations++] = l; + } + + constexpr bool UsesLocation(Location l) const { + for (size_t i = 0; i < this->num_locations; i++) { + if (this->locations[i] == l) { + return true; + } + } + return false; + } + + constexpr bool operator==(const Parameter &rhs) const { + if (!(this->identifier == rhs.identifier && + this->type == rhs.type && + this->type_size == rhs.type_size && + this->passed_size == rhs.passed_size && + this->passed_by_pointer == rhs.passed_by_pointer && + this->num_locations == rhs.num_locations)) + { + return false; + } + + for (size_t i = 0; i < this->num_locations; i++) { + if (!(this->locations[i] == rhs.locations[i])) { + return false; + } + } + + return true; + } + + constexpr bool operator!=(const Parameter &rhs) const { + return !(*this == rhs); + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp new file mode 100644 index 000000000..a992442d3 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_kernel_svc_wrapper.hpp" + +namespace ams::svc::codegen { + +#if defined(ATMOSPHERE_ARCH_ARM64) || defined(ATMOSPHERE_ARCH_ARM) + + template + class KernelSvcWrapper { + private: + /* TODO: using Aarch32 = */ + using Aarch64 = impl::KernelSvcWrapperHelper; + using Aarch64From32 = impl::KernelSvcWrapperHelper; + public: +/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ +#pragma GCC push_options +#pragma GCC optimize ("omit-frame-pointer") + + static ALWAYS_INLINE void Call64() { + Aarch64::WrapSvcFunction(); + } + + static ALWAYS_INLINE void Call64From32() { + Aarch64From32::WrapSvcFunction(); + } + +#pragma GCC pop_options + }; + +#else + + #error "Unknown architecture for Kernel SVC Code Generation" + +#endif + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/svc_codegen.hpp b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp new file mode 100644 index 000000000..59e7c1b1a --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +/* NOTE: This header must not be included by svc.hpp. */ +#include "svc_common.hpp" +#include "svc_types.hpp" +#include "svc_definitions.hpp" + +#include "codegen/svc_codegen_kernel_svc_wrapper.hpp" diff --git a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp index ea0062e83..fa305f536 100644 --- a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp @@ -20,13 +20,13 @@ #define AMS_SVC_KERN_INPUT_HANDLER(TYPE, NAME) TYPE NAME #define AMS_SVC_KERN_OUTPUT_HANDLER(TYPE, NAME) TYPE *NAME -#define AMS_SVC_KERN_INPTR_HANDLER(TYPE, NAME) ::ams::kern::KUserPointer NAME -#define AMS_SVC_KERN_OUTPTR_HANDLER(TYPE, NAME) ::ams::kern::KUserPointer NAME +#define AMS_SVC_KERN_INPTR_HANDLER(TYPE, NAME) ::ams::kern::svc::KUserPointer NAME +#define AMS_SVC_KERN_OUTPTR_HANDLER(TYPE, NAME) ::ams::kern::svc::KUserPointer NAME #define AMS_SVC_USER_INPUT_HANDLER(TYPE, NAME) TYPE NAME #define AMS_SVC_USER_OUTPUT_HANDLER(TYPE, NAME) TYPE *NAME -#define AMS_SVC_USER_INPTR_HANDLER(TYPE, NAME) const TYPE *NAME -#define AMS_SVC_USER_OUTPTR_HANDLER(TYPE, NAME) TYPE *NAME +#define AMS_SVC_USER_INPTR_HANDLER(TYPE, NAME) ::ams::svc::UserPointer NAME +#define AMS_SVC_USER_OUTPTR_HANDLER(TYPE, NAME) ::ams::svc::UserPointer NAME #define AMS_SVC_FOREACH_DEFINITION_IMPL(HANDLER, NAMESPACE, INPUT, OUTPUT, INPTR, OUTPTR) \ HANDLER(0x01, Result, SetHeapSize, OUTPUT(::ams::svc::Address, out_address), INPUT(::ams::svc::Size, size)) \ @@ -181,5 +181,49 @@ namespace ams::svc { } +/* NOTE: Change this to 1 to test the SVC definitions for user-pointer validity. */ +#if 0 +namespace ams::svc::test { + + namespace impl { + + template + struct Validator { + private: + std::array valid; + public: + constexpr Validator(Ts... args) : valid{static_cast(args)...} { /* ... */ } + + constexpr bool IsValid() const { + for (size_t i = 0; i < sizeof...(Ts); i++) { + if (!this->valid[i]) { + return false; + } + } + return true; + } + }; + + } + + + #define AMS_SVC_TEST_EMPTY_HANDLER(TYPE, NAME) true + #define AMS_SVC_TEST_INPTR_HANDLER(TYPE, NAME) (sizeof(::ams::svc::UserPointer) == sizeof(uintptr_t) && std::is_trivially_destructible<::ams::svc::UserPointer>::value) + #define AMS_SVC_TEST_OUTPTR_HANDLER(TYPE, NAME) (sizeof(::ams::svc::UserPointer) == sizeof(uintptr_t) && std::is_trivially_destructible<::ams::svc::UserPointer>::value) + + #define AMS_SVC_TEST_VERIFY_USER_POINTERS(ID, RETURN_TYPE, NAME, ...) \ + static_assert(impl::Validator(__VA_ARGS__).IsValid(), "Invalid User Pointer in svc::" #NAME); + + AMS_SVC_FOREACH_DEFINITION_IMPL(AMS_SVC_TEST_VERIFY_USER_POINTERS, lp64, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_INPTR_HANDLER, AMS_SVC_TEST_OUTPTR_HANDLER); + AMS_SVC_FOREACH_DEFINITION_IMPL(AMS_SVC_TEST_VERIFY_USER_POINTERS, ilp32, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_INPTR_HANDLER, AMS_SVC_TEST_OUTPTR_HANDLER); + + #undef AMS_SVC_TEST_VERIFY_USER_POINTERS + #undef AMS_SVC_TEST_INPTR_HANDLER + #undef AMS_SVC_TEST_OUTPTR_HANDLER + #undef AMS_SVC_TEST_EMPTY_HANDLER + +} #endif +#endif /* ATMOSPHERE_IS_STRATOSPHERE */ + diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 72f5b13df..34dfc3b3d 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -17,6 +17,12 @@ #pragma once #include "svc_common.hpp" +namespace ams::kern::svc::impl { + + struct KUserPointerTag{}; + +} + namespace ams::svc { /* Utility classes required to encode information into the type system for SVC veneers. */ @@ -40,6 +46,24 @@ namespace ams::svc { static_assert(sizeof(Address) == sizeof(uintptr_t)); static_assert(std::is_trivially_destructible
::value); + namespace impl { + + struct UserPointerTag{}; + + } + + template + struct UserPointer : impl::UserPointerTag { + public: + static_assert(std::is_pointer::value); + static constexpr bool IsInput = std::is_const::type>::value; + private: + T pointer; + }; + + template + static constexpr inline bool IsUserPointer = std::is_base_of::value; + using PhysicalAddress = u64; /* Memory types. */