1.下列线程池是更改github版本的线程池,因为需求需要减少线程之间的race condition,做到各线程处理特化的事务更改的(主要还是自己前期懒对数据锁控制不完全,为了减少race condition就让每个线程处理自己队列序列化顺序的任务).
#ifndef THREAD_POOL_H
#define THREAD_POOL_H
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
//example
//enum
//{
// WII = 0, HMD = 1, GLOVE = 2, NUMMAX
//};
//ThreadPool tp(NUMMAX);
//
//tp.enqueue(WII, []()
//{
//std::this_thread::sleep_for(std::chrono::seconds(1));
//std::cout << std::this_thread::get_id() << ":WII get recived Message\n";
//});
//
//tp.enqueue(GLOVE, []()
//{
//std::this_thread::sleep_for(std::chrono::seconds(1));
//std::cout << std::this_thread::get_id() << ":GLOVE get recived Message\n";
//});
//
//tp.enqueue(HMD, []()
//{
//std::this_thread::sleep_for(std::chrono::seconds(1));
//std::cout << std::this_thread::get_id() << ":HMD get recived Message\n";
//});
class ThreadPool
{
public:
ThreadPool(size_t);
template<class F, class... Args>
auto enqueue(unsigned int threadid, F &&f, Args &&... args)
-> std::future<typename std::result_of<F(Args...)>::type>;
~ThreadPool();
private:
// need to keep track of threads so we can join them
std::vector<std::thread> workers;
// the task queue
std::vector<std::queue<std::function<void()>>> vec_queue_tasks;
// synchronization
std::vector<std::mutex *> vec_queue_mutex;
std::vector<std::condition_variable *> vec_queue_condition;
std::vector<bool> stops;
std::atomic<bool> terminateThreadControl;
};
// the constructor just launches some amount of workers
inline ThreadPool::ThreadPool(size_t threads) : terminateThreadControl(false)
{
vec_queue_tasks.resize(threads);
for (int index = 0; index < threads; ++index)
{
vec_queue_mutex.push_back(new std::mutex);
vec_queue_condition.push_back(new std::condition_variable());
}
stops.resize(threads, false);
for (size_t i = 0; i < threads; ++i)
workers.emplace_back(
[this, i]
{
for (; !terminateThreadControl.load();)
{
std::function<void()> task;
{
std::unique_lock<std::mutex> lock(*(this->vec_queue_mutex[i]));
this->vec_queue_condition[i]->wait(lock,
[this, i]
{
return this->stops[i] ||
!this->vec_queue_tasks[i].empty();
});
if (this->stops[i] && this->vec_queue_tasks[i].empty())
return;
task = std::move(this->vec_queue_tasks[i].front());
this->vec_queue_tasks[i].pop();
}
task();
}
thread_terminate:
std::cout << std::this_thread::get_id() << ": terminate\n";
}
);
}
// add new work item to the pool
template<class F, class... Args>
auto ThreadPool::enqueue(unsigned int threadid, F &&f, Args &&... args)
-> std::future<typename std::result_of<F(Args...)>::type>
{
using return_type = typename std::result_of<F(Args...)>::type;
if (threadid > (vec_queue_tasks.size() - 1))return std::future<return_type>();
auto task = std::make_shared<std::packaged_task<return_type()> >(
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
);
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(*(vec_queue_mutex[threadid]));
// don't allow enqueueing after stopping the pool
if (stops[threadid])
throw std::runtime_error("enqueue on stopped ThreadPool");
vec_queue_tasks[threadid].emplace([task]()
{ (*task)(); });
}
vec_queue_condition[threadid]->notify_one();
return res;
}
// the destructor joins all threads
inline ThreadPool::~ThreadPool()
{
terminateThreadControl.store(true);
for (int index = 0; index < workers.size(); index++)
{
if(workers[index].joinable())workers[index].join();
{
delete vec_queue_mutex[index];
delete vec_queue_condition[index];
}
}
}
#endif
**粗体** _斜体_ [链接](http://example.com) `代码` - 列表 > 引用
。你还可以使用@
来通知其他用户。