Removed unused code from base library (all what is left from the old renderer).

This commit is contained in:
Alex Zolotarev 2015-12-10 14:34:17 +03:00
parent 80b4589570
commit e5b071bed9
18 changed files with 0 additions and 1817 deletions

View file

@ -9,18 +9,13 @@ include($$ROOT_DIR/common.pri)
SOURCES += \
base.cpp \
commands_queue.cpp \
condition.cpp \
deferred_task.cpp \
exception.cpp \
fence_manager.cpp \
internal/message.cpp \
logging.cpp \
lower_case.cpp \
normalize_unicode.cpp \
object_tracker.cpp \
resource_pool.cpp \
runner.cpp \
shared_buffer_manager.cpp \
src_point.cpp \
string_format.cpp \
@ -42,12 +37,9 @@ HEADERS += \
buffer_vector.hpp \
cache.hpp \
cancellable.hpp \
commands_queue.hpp \
condition.hpp \
const_helper.hpp \
deferred_task.hpp \
exception.hpp \
fence_manager.hpp \
internal/message.hpp \
limited_priority_queue.hpp \
logging.hpp \
@ -55,14 +47,11 @@ HEADERS += \
math.hpp \
matrix.hpp \
mem_trie.hpp \
mru_cache.hpp \
mutex.hpp \
object_tracker.hpp \
observer_list.hpp \
regexp.hpp \
resource_pool.hpp \
rolling_hash.hpp \
runner.hpp \
scope_guard.hpp \
set_operations.hpp \
shared_buffer_manager.hpp \

View file

@ -16,17 +16,13 @@ SOURCES += \
bits_test.cpp \
buffer_vector_test.cpp \
cache_test.cpp \
commands_queue_test.cpp \
condition_test.cpp \
const_helper.cpp \
containers_test.cpp \
deferred_task_test.cpp \
fence_manager_test.cpp \
logging_test.cpp \
math_test.cpp \
matrix_test.cpp \
mem_trie_test.cpp \
mru_cache_test.cpp \
observer_list_test.cpp \
regexp_test.cpp \
rolling_hash_test.cpp \

View file

@ -1,119 +0,0 @@
#include "testing/testing.hpp"
#include "base/commands_queue.hpp"
#include "base/macros.hpp"
#include "base/thread.hpp"
#include "base/logging.hpp"
#include "std/atomic.hpp"
#include "std/bind.hpp"
void add_int(core::CommandsQueue::Environment const & env,
atomic<int> & i,
int a)
{
threads::Sleep(500);
if (env.IsCancelled())
return;
i += a;
LOG(LINFO, ("add_int result:", i));
}
void join_mul_int(core::CommandsQueue::Environment const & env,
shared_ptr<core::CommandsQueue::Command> const & command,
atomic<int> & i,
int b)
{
command->join();
int value = i;
while (!i.compare_exchange_weak(value, value * b));
LOG(LINFO, ("join_mul_int result: ", i));
}
void mul_int(core::CommandsQueue::Environment const & env, atomic<int> & i, int b)
{
if (env.IsCancelled())
return;
int value = i;
while (!i.compare_exchange_weak(value, value * b));
LOG(LINFO, ("mul_int result: ", i));
}
UNIT_TEST(CommandsQueue_SetupAndPerformSimpleTask)
{
core::CommandsQueue queue(1);
atomic<int> i(3);
queue.Start();
queue.AddCommand(bind(&add_int, _1, ref(i), 5));
queue.AddCommand(bind(&mul_int, _1, ref(i), 3));
queue.Join();
// threads::Sleep(1000);
queue.Cancel();
TEST(i == 24, ());
}
UNIT_TEST(CommandsQueue_SetupAndPerformSimpleTaskWith2Executors)
{
core::CommandsQueue queue(2);
atomic<int> i(3);
queue.Start();
queue.AddCommand(bind(&add_int, _1, ref(i), 5));
queue.AddCommand(bind(&mul_int, _1, ref(i), 3));
queue.Join();
// threads::Sleep(1000);
queue.Cancel();
TEST(i == 14, ());
}
UNIT_TEST(CommandsQueue_TestEnvironmentCancellation)
{
core::CommandsQueue queue(2);
atomic<int> i(3);
queue.Start();
queue.AddCommand(bind(&add_int, _1, ref(i), 5));
queue.AddCommand(bind(&mul_int, _1, ref(i), 3));
threads::Sleep(200); //< after this second command is executed, first will be cancelled
queue.Cancel();
TEST(i == 9, ());
}
UNIT_TEST(CommandsQueue_TestJoinCommand)
{
core::CommandsQueue queue0(1);
core::CommandsQueue queue1(1);
atomic<int> i(3);
queue0.Start();
queue1.Start();
shared_ptr<core::CommandsQueue::Command> cmd =
queue0.AddCommand(bind(&add_int, _1, ref(i), 5), true);
queue1.AddCommand(bind(&join_mul_int, _1, cmd, ref(i), 3), false);
queue0.Join();
queue1.Join();
queue0.Cancel();
queue1.Cancel();
TEST(i == 24, ("core::CommandsQueue::Command::join doesn't work"));
}

View file

@ -1,81 +0,0 @@
#include "testing/testing.hpp"
#include "base/deferred_task.hpp"
#include "std/atomic.hpp"
#include "std/bind.hpp"
namespace
{
steady_clock::duration kSteadyClockResolution(1);
milliseconds const kTimeInaccuracy =
std::max(milliseconds(1), duration_cast<milliseconds>(kSteadyClockResolution));
void AddInt(atomic<int> & value, int a) { value += a; }
void MulInt(atomic<int> & value, int m)
{
int v = value;
while (!value.compare_exchange_weak(v, v * m))
;
}
} // namespace
// DeferredTask start (stop) is a memory barrier because it starts
// (stops) a thread. That's why it's ok to create time points before
// DeferredTask creation and after DeferredTask completion. Also,
// we're assuming that steady_clocks are consistent between CPU cores.
UNIT_TEST(DeferredTask_SimpleAdd)
{
steady_clock::time_point const start = steady_clock::now();
milliseconds const delay(1000);
atomic<int> value(0);
DeferredTask task1(bind(&AddInt, ref(value), 1), delay);
DeferredTask task2(bind(&AddInt, ref(value), 2), delay);
task1.WaitForCompletion();
task2.WaitForCompletion();
TEST_EQUAL(value, 3, ());
steady_clock::time_point const end = steady_clock::now();
milliseconds const elapsed = duration_cast<milliseconds>(end - start);
TEST(elapsed >= delay - kTimeInaccuracy, (elapsed.count(), delay.count()));
}
UNIT_TEST(DeferredTask_SimpleMul)
{
steady_clock::time_point const start = steady_clock::now();
milliseconds const delay(1500);
atomic<int> value(1);
DeferredTask task1(bind(&MulInt, ref(value), 2), delay);
DeferredTask task2(bind(&MulInt, ref(value), 3), delay);
task1.WaitForCompletion();
task2.WaitForCompletion();
TEST_EQUAL(value, 6, ());
steady_clock::time_point const end = steady_clock::now();
milliseconds const elapsed = duration_cast<milliseconds>(end - start);
TEST(elapsed >= delay - kTimeInaccuracy, (elapsed.count(), delay.count()));
}
UNIT_TEST(DeferredTask_CancelNoBlocking)
{
steady_clock::time_point const start = steady_clock::now();
milliseconds const delay(1500);
atomic<int> value(0);
DeferredTask task(bind(&AddInt, ref(value), 1), delay);
task.Cancel();
task.WaitForCompletion();
if (task.WasStarted())
{
TEST_EQUAL(value, 1, ());
steady_clock::time_point const end = steady_clock::now();
milliseconds const elapsed = duration_cast<milliseconds>(end - start);
TEST(elapsed >= delay - kTimeInaccuracy, (elapsed.count(), delay.count()));
}
}

View file

@ -1,95 +0,0 @@
#include "testing/testing.hpp"
#include "base/commands_queue.hpp"
#include "base/fence_manager.hpp"
#include "std/bind.hpp"
void add_int(core::CommandsQueue::Environment const & env,
int fenceID,
FenceManager & fenceManager,
int & i,
int a,
int ms)
{
threads::Sleep(ms);
if (env.IsCancelled())
return;
i += a;
LOG(LINFO, ("add_int result:", i));
fenceManager.signalFence(fenceID);
}
void join_mul_int(core::CommandsQueue::Environment const & env,
int fenceID,
FenceManager & fenceManager,
int & i,
int b,
int ms)
{
threads::Sleep(ms);
fenceManager.joinFence(fenceID);
i *= b;
LOG(LINFO, ("join_mul_int result: ", i));
}
UNIT_TEST(FenceManager_SimpleFence)
{
FenceManager fenceManager(3);
core::CommandsQueue queue(2);
int i = 3;
queue.Start();
int fenceID = fenceManager.insertFence();
queue.AddCommand(bind(&add_int, _1, fenceID, ref(fenceManager), ref(i), 5, 500));
queue.AddCommand(bind(&join_mul_int, _1, fenceID, ref(fenceManager), ref(i), 3, 1));
queue.Join();
queue.Cancel();
TEST(i == 24, ());
}
UNIT_TEST(FenceManager_JoinAlreadySignalled)
{
FenceManager fenceManager(3);
core::CommandsQueue queue(2);
int i = 3;
queue.Start();
int fenceID = fenceManager.insertFence();
queue.AddCommand(bind(&add_int, _1, fenceID, ref(fenceManager), ref(i), 5, 1));
queue.AddCommand(bind(&join_mul_int, _1, fenceID, ref(fenceManager), ref(i), 3, 500));
queue.Join();
queue.Cancel();
TEST(i == 24, ());
}
UNIT_TEST(FenceManager_SignalAlreadySignalled)
{
FenceManager fenceManager(3);
core::CommandsQueue queue(2);
int i = 3;
queue.Start();
int fenceID = fenceManager.insertFence();
fenceManager.signalFence(fenceID);
queue.AddCommand(bind(&add_int, _1, fenceID, ref(fenceManager), ref(i), 5, 1));
queue.AddCommand(bind(&join_mul_int, _1, fenceID, ref(fenceManager), ref(i), 3, 500));
queue.Join();
queue.Cancel();
TEST(i == 24, ())
}

View file

@ -1,48 +0,0 @@
#include "base/SRC_FIRST.hpp"
#include "testing/testing.hpp"
#include "base/mru_cache.hpp"
UNIT_TEST(MRUCache_Test)
{
my::MRUCache<int, int> m(4);
CHECK(m.HasElem(1) == false, ());
m.Add(1, 2, 1);
CHECK(m.HasElem(1) == true, ());
CHECK(m.Find(1, false) == 2, ());
m.Add(2, 4, 2);
CHECK(m.HasElem(2) == true, ());
CHECK(m.Find(2, false) == 4, ());
m.Touch(1);
m.Add(3, 9, 2);
CHECK(m.HasElem(3) == true, ());
CHECK(m.Find(3, false) == 9, ());
CHECK(m.HasElem(2) == false, ());
CHECK(m.HasElem(1) == true, ());
m.LockElem(1);
m.Add(4, 16, 2);
CHECK(m.HasElem(4) == true, ());
CHECK(m.Find(4, false) == 16, ());
CHECK(m.HasElem(3) == false, ());
CHECK(m.HasElem(1) == true, ());
m.UnlockElem(1);
// 2 is still older than 4, so check this
m.Add(5, 25, 2);
CHECK(m.HasElem(5) == true, ());
CHECK(m.HasElem(4) == true, ());
CHECK(m.HasElem(1) == false, ());
}

View file

@ -1,236 +0,0 @@
#include "base/SRC_FIRST.hpp"
#include "base/logging.hpp"
#include "base/assert.hpp"
#include "std/bind.hpp"
#include "base/commands_queue.hpp"
namespace core
{
CommandsQueue::Environment::Environment(size_t threadNum)
: m_threadNum(threadNum)
{}
size_t CommandsQueue::Environment::threadNum() const
{
return m_threadNum;
}
CommandsQueue::BaseCommand::BaseCommand(bool isWaitable)
: m_waitCount(0), m_isCompleted(false)
{
if (isWaitable)
m_cond.reset(new threads::Condition());
}
void CommandsQueue::BaseCommand::join()
{
if (m_cond)
{
threads::ConditionGuard g(*m_cond.get());
m_waitCount++;
if (!m_isCompleted)
g.Wait();
}
else
LOG(LERROR, ("command isn't waitable"));
}
void CommandsQueue::BaseCommand::finish() const
{
if (m_cond)
{
threads::ConditionGuard g(*m_cond.get());
m_isCompleted = true;
CHECK(m_waitCount < 2, ("only one thread could wait for the queued command"));
if (m_waitCount)
g.Signal(true);
}
}
CommandsQueue::Chain::Chain()
{}
void CommandsQueue::Chain::operator()(CommandsQueue::Environment const & env)
{
for (list<function_t>::const_iterator it = m_fns.begin(); it != m_fns.end(); ++it)
(*it)(env);
}
CommandsQueue::Command::Command(bool isWaitable)
: BaseCommand(isWaitable)
{}
void CommandsQueue::Command::perform(Environment const & env) const
{
m_fn(env);
finish();
}
CommandsQueue::Routine::Routine(CommandsQueue * parent, size_t idx)
: m_parent(parent), m_env(idx)
{}
void CommandsQueue::Routine::Do()
{
// performing initialization tasks
for(list<shared_ptr<Command> >::const_iterator it = m_parent->m_initCommands.begin();
it != m_parent->m_initCommands.end();
++it)
(*it)->perform(m_env);
// main loop
while (!IsCancelled())
{
shared_ptr<CommandsQueue::Command> cmd = m_parent->m_commands.Front(true);
if (m_parent->m_commands.IsCancelled())
break;
m_env.Reset();
cmd->perform(m_env);
m_parent->FinishCommand();
}
// performing finalization tasks
for(list<shared_ptr<Command> >::const_iterator it = m_parent->m_finCommands.begin();
it != m_parent->m_finCommands.end();
++it)
(*it)->perform(m_env);
}
void CommandsQueue::Routine::Cancel()
{
m_env.Cancel();
// performing cancellation tasks
for(list<shared_ptr<Command> >::const_iterator it = m_parent->m_cancelCommands.begin();
it != m_parent->m_cancelCommands.end();
++it)
(*it)->perform(m_env);
IRoutine::Cancel();
}
void CommandsQueue::Routine::CancelCommand()
{
m_env.Cancel();
}
void CommandsQueue::Executor::Cancel()
{
if (m_thread.GetRoutine())
m_thread.Cancel();
}
void CommandsQueue::Executor::CancelCommand()
{
Routine * routine = m_thread.GetRoutineAs<Routine>();
CHECK(routine, ());
routine->CancelCommand();
}
CommandsQueue::CommandsQueue(size_t executorsCount)
: m_executors(executorsCount), m_activeCommands(0)
{
}
CommandsQueue::~CommandsQueue()
{
/// @todo memory leak in m_executors? call Cancel()?
//CHECK ( m_executors == 0, () );
}
void CommandsQueue::Cancel()
{
m_commands.Cancel();
for (auto & executor : m_executors)
executor.Cancel();
m_executors.clear();
}
void CommandsQueue::CancelCommands()
{
for (auto & executor : m_executors)
executor.CancelCommand();
}
void CommandsQueue::Start()
{
for (size_t i = 0; i < m_executors.size(); ++i)
m_executors[i].m_thread.Create(make_unique<Routine>(this, i));
}
void CommandsQueue::AddCommand(shared_ptr<Command> const & cmd)
{
threads::ConditionGuard g(m_cond);
m_commands.PushBack(cmd);
++m_activeCommands;
}
void CommandsQueue::AddInitCommand(shared_ptr<Command> const & cmd)
{
m_initCommands.push_back(cmd);
}
void CommandsQueue::AddFinCommand(shared_ptr<Command> const & cmd)
{
m_finCommands.push_back(cmd);
}
void CommandsQueue::AddCancelCommand(shared_ptr<Command> const & cmd)
{
m_cancelCommands.push_back(cmd);
}
void CommandsQueue::FinishCommand()
{
threads::ConditionGuard g(m_cond);
--m_activeCommands;
if (m_activeCommands == 0)
g.Signal(true);
}
void CommandsQueue::Join()
{
threads::ConditionGuard g(m_cond);
while (m_activeCommands != 0)
g.Wait();
}
void CommandsQueue::ClearImpl(list<shared_ptr<CommandsQueue::Command> > & l)
{
threads::ConditionGuard g(m_cond);
size_t s = l.size();
l.clear();
m_activeCommands -= s;
if (m_activeCommands == 0)
g.Signal(true);
}
void CommandsQueue::Clear()
{
/// let us assume that decreasing m_activeCommands is an "operation A"
/// and clearing the list of commands is an "operation B"
/// we should perform them atomically (both or none at the same time)
/// to prevent the situation when Executor could start processing some command
/// between "operation A" and "operation B" which could lead to underflow of m_activeCommands
m_commands.ProcessList([this](list<shared_ptr<CommandsQueue::Command> > & l)
{
ClearImpl(l);
});
}
size_t CommandsQueue::ExecutorsCount() const
{
return m_executors.size();
}
}

View file

@ -1,204 +0,0 @@
#pragma once
#include "std/function.hpp"
#include "std/shared_ptr.hpp"
#include "std/vector.hpp"
#include "base/cancellable.hpp"
#include "base/thread.hpp"
#include "base/threaded_list.hpp"
namespace core
{
/// class, that executes task, specified as a functors on the specified number of threads
/// - all tasks are stored in the single ThreadedList
class CommandsQueue
{
private:
class Routine;
public:
struct Command;
/// execution environment for single command
/// - passed into the task functor
/// - task functor should check the IsCancelled()
/// on the reasonable small interval and cancel
/// it's work upon receiving "true".
class Environment : public my::Cancellable
{
private:
size_t m_threadNum;
protected:
explicit Environment(size_t threadNum);
friend class Routine;
public:
size_t threadNum() const; //< number of the thread, which is executing the commands
};
/// single commmand
typedef function<void(Environment const &)> function_t;
/// basic command
/// - could wait for the completion of its execution
struct BaseCommand
{
shared_ptr<threads::Condition> m_cond;
mutable int m_waitCount;
mutable bool m_isCompleted;
/// should we create the threads::Condition ?
/// this flag is used to save resources
BaseCommand(bool isWaitable);
/// call this function when the execution
/// of the command is finished to release the waiters.
void finish() const;
/// @warning only single thread could "join" command
void join();
};
/// chain of commands
struct Chain
{
private:
list<function_t> m_fns;
public:
Chain();
Chain(function_t const & fn)
{
m_fns.push_back(fn);
}
Chain & addCommand(function_t const & fn)
{
m_fns.push_back(fn);
return *this;
}
void operator()(Environment const & env);
};
/// single command.
/// - commands could be chained together, using Chain class
struct Command : BaseCommand
{
private:
function_t m_fn;
public:
Command(bool isWaitable = false);
template <typename tt>
Command(tt t, bool isWaitable = false)
: BaseCommand(isWaitable), m_fn(t)
{}
void perform(Environment const & env) const;
};
private:
/// single execution routine
class Routine : public threads::IRoutine
{
private:
CommandsQueue * m_parent;
Environment m_env;
public:
Routine(CommandsQueue * parent, size_t idx);
/// threads::IRoutine overrides:
void Do() override;
void Cancel() override;
void CancelCommand();
};
/// class, which excapsulates thread and routine into single class.
struct Executor
{
threads::Thread m_thread;
void Cancel();
void CancelCommand();
};
vector<Executor> m_executors;
ThreadedList<shared_ptr<Command> > m_commands;
list<shared_ptr<Command> > m_initCommands;
list<shared_ptr<Command> > m_finCommands;
list<shared_ptr<Command> > m_cancelCommands;
friend class Routine;
threads::Condition m_cond;
size_t m_activeCommands;
void FinishCommand();
CommandsQueue(CommandsQueue const &);
CommandsQueue const & operator=(CommandsQueue const &);
void ClearImpl(list<shared_ptr<Command> > & l);
public:
CommandsQueue(size_t executorsCount);
~CommandsQueue();
/// Number of executors in this queue
size_t ExecutorsCount() const;
/// Adding different types of commands
/// @{
void AddCommand(shared_ptr<Command> const & cmd);
void AddInitCommand(shared_ptr<Command> const & cmd);
void AddFinCommand(shared_ptr<Command> const & cmd);
void AddCancelCommand(shared_ptr<Command> const & cmd);
/// @}
void Start();
void Cancel();
void CancelCommands();
void Join();
void Clear();
template<typename command_tt>
shared_ptr<Command> AddCommand(command_tt cmd, bool isWaitable = false)
{
shared_ptr<Command> pcmd(new Command(cmd, isWaitable));
AddCommand(pcmd);
return pcmd;
}
template <typename command_tt>
shared_ptr<Command> AddInitCommand(command_tt cmd, bool isWaitable = false)
{
shared_ptr<Command> const pcmd(new Command(cmd, isWaitable));
AddInitCommand(pcmd);
return pcmd;
}
template <typename command_tt>
shared_ptr<Command> const AddFinCommand(command_tt cmd, bool isWaitable = false)
{
shared_ptr<Command> pcmd(new Command(cmd, isWaitable));
AddFinCommand(pcmd);
return pcmd;
}
template <typename command_tt>
shared_ptr<Command> const AddCancelCommand(command_tt cmd, bool isWaitable = false)
{
shared_ptr<Command> pcmd(new Command(cmd, isWaitable));
AddCancelCommand(pcmd);
return pcmd;
}
};
}

View file

@ -1,77 +0,0 @@
#include "base/deferred_task.hpp"
#include "base/timer.hpp"
#include "base/logging.hpp"
#include "std/algorithm.hpp"
#include "std/mutex.hpp"
DeferredTask::Routine::Routine(TTask const & task, milliseconds delay, atomic<bool> & started)
: m_task(task), m_delay(delay), m_started(started)
{
}
void DeferredTask::Routine::Do()
{
mutex mu;
unique_lock<mutex> lock(mu);
steady_clock::time_point const end = steady_clock::now() + m_delay;
while (!IsCancelled())
{
steady_clock::time_point const current = steady_clock::now();
if (current >= end)
break;
m_cv.wait_for(lock, end - current, [this]()
{
return IsCancelled();
});
}
if (!IsCancelled())
{
m_started = true;
m_task();
}
}
void DeferredTask::Routine::Cancel()
{
threads::IRoutine::Cancel();
m_cv.notify_one();
}
DeferredTask::DeferredTask(TTask const & task, milliseconds ms) : m_started(false)
{
m_thread.Create(make_unique<Routine>(task, ms, m_started));
}
DeferredTask::~DeferredTask()
{
ASSERT_THREAD_CHECKER(m_threadChecker, ());
m_thread.Cancel();
}
bool DeferredTask::WasStarted() const
{
ASSERT_THREAD_CHECKER(m_threadChecker, ());
return m_started;
}
void DeferredTask::Cancel()
{
ASSERT_THREAD_CHECKER(m_threadChecker, ());
threads::IRoutine * routine = m_thread.GetRoutine();
CHECK(routine, ());
routine->Cancel();
}
void DeferredTask::WaitForCompletion()
{
ASSERT_THREAD_CHECKER(m_threadChecker, ());
m_thread.Join();
}

View file

@ -1,61 +0,0 @@
#pragma once
#include "base/condition.hpp"
#include "base/macros.hpp"
#include "base/thread.hpp"
#include "base/thread_checker.hpp"
#include "std/chrono.hpp"
#include "std/condition_variable.hpp"
#include "std/function.hpp"
#include "std/unique_ptr.hpp"
// This class is used to call a function after waiting for a specified
// amount of time. The function is called in a separate thread. This
// class is not thread safe.
class DeferredTask
{
public:
typedef function<void()> TTask;
DeferredTask(TTask const & task, milliseconds ms);
~DeferredTask();
/// Returns true if task was started after delay.
bool WasStarted() const;
/// Cancels task without waiting for worker thread termination.
void Cancel();
/// Waits for task's completion and worker thread termination.
void WaitForCompletion();
private:
class Routine : public threads::IRoutine
{
TTask const m_task;
milliseconds const m_delay;
condition_variable m_cv;
atomic<bool> & m_started;
public:
Routine(TTask const & task, milliseconds delay, atomic<bool> & started);
// IRoutine overrides:
void Do() override;
// my::Cancellable overrides:
void Cancel() override;
};
/// The construction and destruction order is strict here: m_started
/// is used by routine that will be executed on m_thread.
atomic<bool> m_started;
threads::Thread m_thread;
DECLARE_THREAD_CHECKER(m_threadChecker);
DISALLOW_COPY_AND_MOVE(DeferredTask);
};

View file

@ -1,133 +0,0 @@
#include "base/fence_manager.hpp"
#include "base/assert.hpp"
#include "base/logging.hpp"
FenceManager::FenceManager(int conditionPoolSize)
: m_currentFence(0),
m_isCancelled(false)
{
for (unsigned i = 0; i < conditionPoolSize; ++i)
m_conditionPool.push_back(new threads::Condition());
}
FenceManager::~FenceManager()
{
for (map<int, threads::Condition*>::const_iterator it = m_activeFences.begin();
it != m_activeFences.end();
++it)
{
it->second->Signal();
delete it->second;
}
for (list<threads::Condition*>::iterator it = m_conditionPool.begin(); it != m_conditionPool.end(); ++it)
delete *it;
}
int FenceManager::insertFence()
{
threads::MutexGuard g(m_mutex);
if (m_isCancelled)
return -1;
if (m_conditionPool.empty())
return -1;
threads::Condition * cond = m_conditionPool.front();
m_conditionPool.pop_front();
int id = m_currentFence++;
m_activeFences[id] = cond;
return id;
}
void FenceManager::signalFence(int id)
{
threads::MutexGuard mutexGuard(m_mutex);
if (m_isCancelled)
return;
map<int, threads::Condition*>::iterator it = m_activeFences.find(id);
if (it == m_activeFences.end())
{
LOG(LDEBUG, ("fence with id", id, "has been already signalled or hasn't been installed yet"));
return;
}
threads::Condition * cond = it->second;
/// i suppose that this guard will be destroyed after mutexGuard
threads::ConditionGuard fenceGuard(*cond);
/// erasing fence from active fences
m_activeFences.erase(it);
/// returning condition to the pool
m_conditionPool.push_back(cond);
/// signalling to all waiting fences
fenceGuard.Signal(true);
}
void FenceManager::joinFence(int id)
{
threads::Condition * cond = 0;
{
threads::MutexGuard g(m_mutex);
if (m_isCancelled)
return;
map<int, threads::Condition*>::iterator it = m_activeFences.find(id);
if (it == m_activeFences.end())
{
LOG(LDEBUG, ("fence with id", id, "has been already reached in the past or hasn't been installed yet"));
return;
}
cond = it->second;
/// we should lock condition here, to prevent us from the situation
/// when the condition will be signaled before it's been waited for
cond->Lock();
}
/// to prevent from "spurious wakeups"
while (m_activeFences.find(id) != m_activeFences.end())
cond->Wait();
cond->Unlock();
}
void FenceManager::cancel()
{
threads::MutexGuard g(m_mutex);
if (m_isCancelled)
return;
m_isCancelled = true;
list<pair<int, threads::Condition*> > tempList;
for (map<int, threads::Condition*>::iterator it = m_activeFences.begin();
it != m_activeFences.end();
++it)
tempList.push_back(make_pair(it->first, it->second));
for (list<pair<int, threads::Condition*> >::const_iterator it = tempList.begin();
it != tempList.end();
++it)
{
threads::ConditionGuard fenceGuard(*it->second);
m_activeFences.erase(it->first);
fenceGuard.Signal(true);
}
}

View file

@ -1,28 +0,0 @@
#pragma once
#include "base/condition.hpp"
#include "base/mutex.hpp"
#include "std/map.hpp"
#include "std/list.hpp"
class FenceManager
{
private:
threads::Mutex m_mutex;
list<threads::Condition *> m_conditionPool;
map<int, threads::Condition *> m_activeFences;
int m_currentFence;
bool m_isCancelled;
public:
FenceManager(int conditionPoolSize);
~FenceManager();
int insertFence();
void joinFence(int id);
void signalFence(int id);
void cancel();
};

View file

@ -1,262 +0,0 @@
#pragma once
#include "base/assert.hpp"
#include "base/logging.hpp"
#include "std/list.hpp"
#include "std/map.hpp"
namespace my
{
template <typename TValue>
struct MRUCacheValueTraits
{
static void Evict(TValue &){}
};
template <typename KeyT, typename ValueT, typename ValueTraitsT = MRUCacheValueTraits<ValueT> >
class MRUCache
{
public:
typedef MRUCache<KeyT, ValueT> this_type;
private:
MRUCache(this_type const & c);
this_type & operator= (this_type const &);
typedef list<KeyT> list_t;
struct MapEntry
{
ValueT m_value;
size_t m_weight;
size_t m_lockCount;
typename list_t::iterator m_it;
};
typedef map<KeyT, MapEntry> map_t;
typedef set<KeyT> key_set_t;
key_set_t m_keys;
map_t m_map;
list_t m_list;
int m_curWeight;
int m_maxWeight;
public:
MRUCache()
: m_curWeight(0), m_maxWeight(0)
{}
explicit MRUCache(int maxWeight)
: m_curWeight(0), m_maxWeight(maxWeight)
{}
set<KeyT> const & Keys() const
{
return m_keys;
}
bool HasRoom(int weight)
{
return m_curWeight + weight <= m_maxWeight;
}
/// how many elements in unlocked state do we have in cache
int UnlockedWeight() const
{
int unlockedWeight = 0;
for (typename map_t::const_iterator it = m_map.begin(); it != m_map.end(); ++it)
{
if (it->second.m_lockCount == 0)
unlockedWeight += it->second.m_weight;
}
return unlockedWeight;
}
/// how many elements in locked state do we have in cache
int LockedWeight() const
{
int lockedWeight = 0;
for (typename map_t::const_iterator it = m_map.begin(); it != m_map.end(); ++it)
{
if (it->second.m_lockCount != 0)
lockedWeight += it->second.m_weight;
}
return lockedWeight;
}
/// how much elements we can fit in this cache, considering unlocked
/// elements, that could be popped out on request
int CanFit() const
{
return m_maxWeight - LockedWeight();
}
int MaxWeight() const
{
return m_maxWeight;
}
void Resize(int maxWeight)
{
m_maxWeight = maxWeight;
// in case of making cache smaller this
// function pops out some unlocked elements
FreeRoom(0);
}
void FreeRoom(int weight)
{
if (HasRoom(weight))
return;
if (!m_list.empty())
{
typename list<KeyT>::iterator it = (++m_list.rbegin()).base();
while (m_curWeight + weight > m_maxWeight)
{
KeyT k = *it;
MapEntry & e = m_map[k];
/// erasing only unlocked elements
if (e.m_lockCount == 0)
{
m_curWeight -= e.m_weight;
ValueTraitsT::Evict(e.m_value);
m_map.erase(k);
m_keys.erase(k);
typename list<KeyT>::iterator nextIt = it;
if (nextIt != m_list.begin())
{
--nextIt;
m_list.erase(it);
it = nextIt;
}
else
{
m_list.erase(it);
break;
}
}
else
{
if (it == m_list.begin())
break;
--it;
}
}
}
ASSERT(m_curWeight + weight <= m_maxWeight, ());
}
bool HasElem(KeyT const & key)
{
return m_keys.find(key) != m_keys.end();
}
void LockElem(KeyT const & key)
{
ASSERT(HasElem(key), ());
m_map[key].m_lockCount += 1;
}
size_t LockCount(KeyT const & key)
{
ASSERT(HasElem(key), ());
return m_map[key].m_lockCount;
}
void UnlockElem(KeyT const & key)
{
ASSERT(HasElem(key), ());
ASSERT(m_map[key].m_lockCount > 0, (m_map[key].m_lockCount));
m_map[key].m_lockCount -= 1;
}
ValueT const & Find(KeyT const & key, bool DoTouch = true)
{
typename map_t::iterator it = m_map.find(key);
ASSERT(it != m_map.end(), ());
if (DoTouch)
Touch(key);
return it->second.m_value;
}
void Touch(KeyT const & key)
{
if (!HasElem(key))
return;
typename map_t::iterator it = m_map.find(key);
ASSERT(it != m_map.end(), ());
typename list_t::iterator listIt = it->second.m_it;
KeyT k = *listIt;
m_list.erase(listIt);
m_list.push_front(k);
it->second.m_it = m_list.begin();
}
void Remove(KeyT const & key)
{
typename map_t::iterator it = m_map.find(key);
ASSERT(it->second.m_lockCount == 0, ("removing locked element"));
if (it != m_map.end() && it->second.m_lockCount == 0)
{
m_curWeight -= it->second.m_weight;
m_list.erase(it->second.m_it);
ValueTraitsT::Evict(it->second.m_value);
m_map.erase(it);
m_keys.erase(key);
}
}
void Add(KeyT const & key, ValueT const & val, size_t weight)
{
if (HasElem(key))
Remove(key);
FreeRoom(weight);
m_list.push_front(key);
m_map[key].m_weight = weight;
m_map[key].m_value = val;
m_map[key].m_lockCount = 0;
m_map[key].m_it = m_list.begin();
m_keys.insert(key);
m_curWeight += weight;
}
void Clear()
{
for (typename map_t::iterator it = m_map.begin(); it != m_map.end(); ++it)
ValueTraitsT::Evict(it->second);
m_map.clear();
m_keys.clear();
m_list.clear();
m_curWeight = 0;
}
};
}

View file

@ -1,27 +0,0 @@
#include "base/SRC_FIRST.hpp"
#include "base/resource_pool.hpp"
BasePoolElemFactory::BasePoolElemFactory(char const * resName,
size_t elemSize,
size_t batchSize)
: m_resName(resName),
m_elemSize(elemSize),
m_batchSize(batchSize)
{}
char const * BasePoolElemFactory::ResName() const
{
return m_resName.c_str();
}
size_t BasePoolElemFactory::ElemSize() const
{
return m_elemSize;
}
size_t BasePoolElemFactory::BatchSize() const
{
return m_batchSize;
}

View file

@ -1,363 +0,0 @@
#pragma once
#include "base/thread.hpp"
#include "base/threaded_list.hpp"
#include "base/logging.hpp"
#include "base/assert.hpp"
#include "std/bind.hpp"
#include "std/unique_ptr.hpp"
//#define ALLOCATED_COUNT
struct BasePoolElemFactory
{
string m_resName;
size_t m_elemSize;
size_t m_batchSize;
BasePoolElemFactory(char const * resName, size_t elemSize, size_t batchSize);
size_t BatchSize() const;
char const * ResName() const;
size_t ElemSize() const;
};
/// basic traits maintains a list of free resources.
template <typename TElem, typename TElemFactory>
struct BasePoolTraits
{
TElemFactory m_factory;
ThreadedList<TElem> m_pool;
bool m_IsDebugging;
threads::ThreadID m_MainThreadID;
typedef TElem elem_t;
BasePoolTraits(TElemFactory const & factory)
: m_factory(factory),
m_IsDebugging(false),
m_MainThreadID(threads::GetCurrentThreadID())
{
m_pool.SetName(factory.ResName());
}
virtual ~BasePoolTraits()
{}
virtual void Init()
{
Free(Reserve());
}
virtual void Free(TElem const & elem)
{
m_pool.PushBack(elem);
}
virtual TElem const Reserve()
{
return m_pool.Front(true);
}
virtual size_t Size() const
{
return m_pool.Size();
}
virtual void Cancel()
{
m_pool.Cancel();
}
virtual bool IsCancelled() const
{
return m_pool.IsCancelled();
}
virtual void UpdateState()
{
}
char const * ResName() const
{
return m_factory.ResName();
}
};
/// This traits stores the free elements in a separate pool and has
/// a separate method to merge them all into a main pool.
/// For example should be used for resources where a certain preparation operation
/// should be performed on main thread before returning resource
/// to a free pool(p.e. @see resource_manager.cpp StorageFactory)
template <typename TElemFactory, typename TBase>
struct SeparateFreePoolTraits : TBase
{
typedef TBase base_t;
typedef typename base_t::elem_t elem_t;
ThreadedList<elem_t> m_freePool;
int m_maxFreePoolSize;
SeparateFreePoolTraits(TElemFactory const & factory)
: base_t(factory), m_maxFreePoolSize(0)
{}
void Free(elem_t const & elem)
{
m_freePool.PushBack(elem);
/* if (base_t::m_IsDebugging)
{
int oldMaxFreePoolSize = m_maxFreePoolSize;
m_maxFreePoolSize = max(m_maxFreePoolSize, (int)m_freePool.Size());
if (oldMaxFreePoolSize != m_maxFreePoolSize)
LOG(LINFO, (base_t::m_pool.GetName(), "freePool maximum size has reached", m_maxFreePoolSize, "elements"));
}*/
}
void UpdateStateImpl(list<elem_t> & l)
{
for (typename list<elem_t>::const_iterator it = l.begin();
it != l.end();
++it)
{
base_t::m_factory.BeforeMerge(*it);
base_t::m_pool.PushBack(*it);
}
// if ((base_t::m_IsDebugging) && (!base_t::m_pool.GetName().empty()))
// LOG(LINFO, ("pool for", base_t::m_pool.GetName(), "has", base_t::m_pool.Size(), "elements"));
l.clear();
}
void UpdateState()
{
m_freePool.ProcessList([this] (list<elem_t> & l) { UpdateStateImpl(l); });
}
};
/// This traits maintains a fixed-size of pre-allocated resources.
template <typename TElemFactory, typename TBase >
struct FixedSizePoolTraits : TBase
{
typedef TBase base_t;
typedef typename base_t::elem_t elem_t;
size_t m_count;
bool m_isAllocated;
FixedSizePoolTraits(TElemFactory const & factory, size_t count)
: base_t(factory),
m_count(count),
m_isAllocated(false)
{}
elem_t const Reserve()
{
if (!m_isAllocated)
{
m_isAllocated = true;
LOG(LDEBUG, ("allocating ", base_t::m_factory.ElemSize() * m_count, "bytes for ", base_t::m_factory.ResName()));
for (size_t i = 0; i < m_count; ++i)
base_t::m_pool.PushBack(base_t::m_factory.Create());
}
return base_t::Reserve();
}
};
/// This traits allocates resources on demand.
template <typename TElemFactory, typename TBase>
struct AllocateOnDemandMultiThreadedPoolTraits : TBase
{
typedef TBase base_t;
typedef typename base_t::elem_t elem_t;
typedef AllocateOnDemandMultiThreadedPoolTraits<TElemFactory, base_t> self_t;
#if defined(ALLOCATED_COUNT)
size_t m_poolSize;
#endif
AllocateOnDemandMultiThreadedPoolTraits(TElemFactory const & factory, size_t )
: base_t(factory)
#if defined(ALLOCATED_COUNT)
, m_poolSize(0)
#endif
{}
void AllocateIfNeeded(list<elem_t> & l)
{
if (l.empty())
{
#if defined(ALLOCATED_COUNT)
m_poolSize += base_t::m_factory.BatchSize();
#endif
for (unsigned i = 0; i < base_t::m_factory.BatchSize(); ++i)
l.push_back(base_t::m_factory.Create());
}
}
elem_t const Reserve()
{
elem_t res;
base_t::m_pool.ProcessList([this, &res] (list<elem_t> & l) { AllocateAndReserve(l, res); });
return res;
}
void AllocateAndReserve(list<elem_t> & l, elem_t & res)
{
AllocateIfNeeded(l);
ASSERT ( !l.empty(), () );
res = l.front();
l.pop_front();
}
};
template <typename TElemFactory, typename TBase>
struct AllocateOnDemandSingleThreadedPoolTraits : TBase
{
typedef TBase base_t;
typedef typename TBase::elem_t elem_t;
typedef AllocateOnDemandSingleThreadedPoolTraits<TElemFactory, TBase> self_t;
#if defined(ALLOCATED_COUNT)
size_t m_poolSize;
#endif
AllocateOnDemandSingleThreadedPoolTraits(TElemFactory const & factory, size_t )
: base_t(factory)
#if defined(ALLOCATED_COUNT)
, m_poolSize(0)
#endif
{}
void Init()
{}
void AllocateIfNeeded(list<elem_t> & l)
{
if (l.empty())
{
#if defined(ALLOCATED_COUNT)
m_poolSize += base_t::m_factory.BatchSize();
#endif
for (unsigned i = 0; i < base_t::m_factory.BatchSize(); ++i)
l.push_back(base_t::m_factory.Create());
}
}
elem_t const Reserve()
{
elem_t res;
/// allocate resources if needed if we're on the main thread.
if (threads::GetCurrentThreadID() == base_t::m_MainThreadID)
base_t::m_pool.ProcessList([this, &res] (list<elem_t> & l) { AllocateAndReserve(l, res); });
else
res = base_t::Reserve();
return res;
}
void AllocateAndReserve(list<elem_t> & l, elem_t & res)
{
AllocateIfNeeded(l);
ASSERT ( !l.empty(), () );
res = l.front();
l.pop_front();
}
void UpdateState()
{
base_t::UpdateState();
base_t::m_pool.ProcessList([this] (list<elem_t> & l) { AllocateIfNeeded(l); });
}
};
/// resource pool interface
template <typename TElem>
class ResourcePool
{
public:
virtual ~ResourcePool(){}
virtual TElem const Reserve() = 0;
virtual void Free(TElem const & elem) = 0;
virtual size_t Size() const = 0;
virtual void EnterForeground() = 0;
virtual void EnterBackground() = 0;
virtual void Cancel() = 0;
virtual bool IsCancelled() const = 0;
virtual void UpdateState() = 0;
virtual void SetIsDebugging(bool flag) = 0;
virtual char const * ResName() const = 0;
};
// This class tracks OpenGL resources allocation in
// a multithreaded environment.
template <typename TPoolTraits>
class ResourcePoolImpl : public ResourcePool<typename TPoolTraits::elem_t>
{
private:
unique_ptr<TPoolTraits> const m_traits;
public:
typedef typename TPoolTraits::elem_t elem_t;
ResourcePoolImpl(TPoolTraits * traits)
: m_traits(traits)
{
/// quick trick to perform lazy initialization
/// on the same thread the pool was created.
m_traits->Init();
}
elem_t const Reserve()
{
return m_traits->Reserve();
}
void Free(elem_t const & elem)
{
m_traits->Free(elem);
}
size_t Size() const
{
return m_traits->Size();
}
void EnterForeground()
{}
void EnterBackground()
{}
void Cancel()
{
return m_traits->Cancel();
}
bool IsCancelled() const
{
return m_traits->IsCancelled();
}
void UpdateState()
{
m_traits->UpdateState();
}
void SetIsDebugging(bool isDebugging)
{
m_traits->m_IsDebugging = isDebugging;
}
char const * ResName() const
{
return m_traits->ResName();
}
};

View file

@ -1,23 +0,0 @@
#include "base/runner.hpp"
#include "base/logging.hpp"
#include "base/exception.hpp"
void threads::IRunner::CallAndCatchAll(RunnerFuncT const & f)
{
try
{
f();
}
catch (RootException & e)
{
LOG(LERROR, ("Exception caught by runner", e.Msg()));
}
catch (::std::exception & e)
{
LOG(LERROR, ("Std exception caught by runner", e.what()));
}
catch (...)
{
LOG(LERROR, ("Unknown exception caught by runner"));
}
}

View file

@ -1,44 +0,0 @@
#pragma once
#include "std/function.hpp"
namespace threads
{
typedef function<void()> RunnerFuncT;
// Base Runner interface: performes given tasks.
class IRunner
{
public:
virtual ~IRunner() {}
virtual void Run(RunnerFuncT const & f) const = 0;
// Helper function that calls f() and catches all exception.
static void CallAndCatchAll(RunnerFuncT const & f);
protected:
// Waits until all running threads stop.
// Not for public use! Used in unit tests only, since
// some runners use global thread pool and interfere with each other.
virtual void Join() = 0;
};
// Synchronous implementation: just immediately executes given tasks.
class SimpleRunner : public IRunner
{
public:
virtual void Run(RunnerFuncT const & f) const
{
IRunner::CallAndCatchAll(f);
}
protected:
virtual void Join()
{
}
};
}

View file

@ -11,7 +11,6 @@
#include "geometry/point2d.hpp"
#include "geometry/polyline2d.hpp"
#include "base/deferred_task.hpp"
#include "base/mutex.hpp"
#include "std/atomic.hpp"