Use scoped_lock, etc

This commit is contained in:
TuxSH 2018-11-05 14:12:38 +01:00 committed by Michael Scire
parent 698fa9fcb0
commit cd1f74154d
7 changed files with 21 additions and 21 deletions

View file

@ -51,13 +51,13 @@ class KObjectAllocator {
void RegisterObject(T &obj) noexcept
{
std::lock_guard guard{mutex};
std::scoped_lock guard{mutex};
allocatedSet.insert(obj);
}
void UnregisterObject(T &obj) noexcept
{
std::lock_guard guard{mutex};
std::scoped_lock guard{mutex};
allocatedSet.erase(obj);
}

View file

@ -8,7 +8,7 @@ namespace mesosphere
void KAlarm::AddAlarmable(IAlarmable &alarmable)
{
std::lock_guard guard{spinlock};
std::scoped_lock guard{spinlock};
alarmables.insert(alarmable);
KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime());
@ -16,7 +16,7 @@ void KAlarm::AddAlarmable(IAlarmable &alarmable)
void KAlarm::RemoveAlarmable(const IAlarmable &alarmable)
{
std::lock_guard guard{spinlock};
std::scoped_lock guard{spinlock};
alarmables.erase(alarmable);
KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime());
@ -26,8 +26,8 @@ void KAlarm::HandleAlarm()
{
{
KCriticalSection &critsec = KScheduler::GetCriticalSection();
std::lock_guard criticalSection{critsec};
std::lock_guard guard{spinlock};
std::scoped_lock criticalSection{critsec};
std::scoped_lock guard{spinlock};
KSystemClock::SetInterruptMasked(true); // mask timer interrupt
KSystemClock::time_point currentTime = KSystemClock::now(), maxAlarmTime;

View file

@ -8,27 +8,27 @@ KResourceLimit KResourceLimit::defaultInstance{};
size_t KResourceLimit::GetCurrentValue(KResourceLimit::Category category) const
{
// Caller should check category
std::lock_guard guard{condvar.mutex()};
std::scoped_lock guard{condvar.mutex()};
return currentValues[(uint)category];
}
size_t KResourceLimit::GetLimitValue(KResourceLimit::Category category) const
{
// Caller should check category
std::lock_guard guard{condvar.mutex()};
std::scoped_lock guard{condvar.mutex()};
return limitValues[(uint)category];
}
size_t KResourceLimit::GetRemainingValue(KResourceLimit::Category category) const
{
// Caller should check category
std::lock_guard guard{condvar.mutex()};
std::scoped_lock guard{condvar.mutex()};
return limitValues[(uint)category] - currentValues[(uint)category];
}
bool KResourceLimit::SetLimitValue(KResourceLimit::Category category, size_t value)
{
std::lock_guard guard{condvar.mutex()};
std::scoped_lock guard{condvar.mutex()};
if ((long)value < 0 || currentValues[(uint)category] > value) {
return false;
} else {
@ -40,7 +40,7 @@ bool KResourceLimit::SetLimitValue(KResourceLimit::Category category, size_t val
void KResourceLimit::Release(KResourceLimit::Category category, size_t count, size_t realCount)
{
// Caller should ensure parameters are correct
std::lock_guard guard{condvar.mutex()};
std::scoped_lock guard{condvar.mutex()};
currentValues[(uint)category] -= count;
realValues[(uint)category] -= realCount;
condvar.notify_all();
@ -48,7 +48,7 @@ void KResourceLimit::Release(KResourceLimit::Category category, size_t count, si
bool KResourceLimit::ReserveDetail(KResourceLimit::Category category, size_t count, const KSystemClock::time_point &timeoutTime)
{
std::lock_guard guard{condvar.mutex()};
std::scoped_lock guard{condvar.mutex()};
if ((long)count <= 0 || realValues[(uint)category] >= limitValues[(uint)category]) {
return false;
}

View file

@ -20,7 +20,7 @@ SharedPtr<KAutoObject> KHandleTable::GetAutoObject(Handle handle) const
// Note: official kernel locks the spinlock here, but we don't need to.
return nullptr;
} else {
std::lock_guard guard{spinlock};
std::scoped_lock guard{spinlock};
return IsValid(handle) ? entries[handle.index].object : nullptr;
}
}
@ -50,7 +50,7 @@ bool KHandleTable::Close(Handle handle)
if (handle.IsAliasOrFree()) {
return false;
} else {
std::lock_guard guard{spinlock};
std::scoped_lock guard{spinlock};
if (IsValid(handle)) {
entries[-firstFreeIndex].id = firstFreeIndex;
firstFreeIndex = -(s16)handle.index;
@ -67,7 +67,7 @@ bool KHandleTable::Generate(Handle &out, SharedPtr<KAutoObject> obj)
{
// Note: nullptr is accepted, for deferred-init.
std::lock_guard guard{spinlock};
std::scoped_lock guard{spinlock};
if (numActive >= capacity) {
return false; // caller should return 0xD201
}
@ -93,7 +93,7 @@ bool KHandleTable::Generate(Handle &out, SharedPtr<KAutoObject> obj)
bool KHandleTable::Set(SharedPtr<KAutoObject> obj, Handle handle)
{
if (!handle.IsAliasOrFree() && IsValid(handle)) {
std::lock_guard guard{spinlock};
std::scoped_lock guard{spinlock};
entries[handle.index].object = std::move(obj);
return true;
} else {

View file

@ -10,7 +10,7 @@ void KConditionVariable::wait_until_impl(const KSystemClock::time_point &timeout
// Official kernel counts number of waiters, but that isn't necessary
{
KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread();
std::lock_guard guard{KScheduler::GetCriticalSection()};
KScopedCriticalSection criticalSection{};
mutex_.unlock();
if (currentThread->WaitForKernelSync(waiterList)) {
(void)timeoutPoint; //TODO!
@ -23,7 +23,7 @@ void KConditionVariable::wait_until_impl(const KSystemClock::time_point &timeout
void KConditionVariable::notify_one() noexcept
{
std::lock_guard guard{KScheduler::GetCriticalSection()};
KScopedCriticalSection criticalSection{};
auto t = waiterList.begin();
if (t != waiterList.end()) {
t->ResumeFromKernelSync();
@ -32,7 +32,7 @@ void KConditionVariable::notify_one() noexcept
void KConditionVariable::notify_all() noexcept
{
std::lock_guard guard{KScheduler::GetCriticalSection()};
KScopedCriticalSection criticalSection{};
KThread::ResumeAllFromKernelSync(waiterList);
}

View file

@ -9,7 +9,7 @@ void KMutex::lock_slow_path(KThread &owner, KThread &requester)
{
// Requester is currentThread most of (all ?) the time
KCriticalSection &critsec = KScheduler::GetCriticalSection();
std::lock_guard criticalSection{critsec};
std::scoped_lock criticalSection{critsec};
if (KCoreContext::GetCurrentInstance().GetScheduler()->IsActive()) {
requester.SetWantedMutex((uiptr)this);
owner.AddMutexWaiter(requester);

View file

@ -32,7 +32,7 @@ void KThread::AdjustScheduling(ushort oldMaskFull)
void KThread::Reschedule(KThread::SchedulingStatus newStatus)
{
//std::lock_guard criticalSection{KScheduler::GetCriticalSection()};
//KScopedCriticalSection criticalSection{};
// TODO check the above ^
AdjustScheduling(SetSchedulingStatusField(newStatus));
}