Problem with gcc4.7 and call_once
David Barto
DBarto at visionpro.com
Wed Aug 7 09:22:05 PDT 2013
Same results with gcc 4.8 +universal
649_ rm threading ; make threading
/opt/local/bin/g++-mp-4.8 -std=c++11 -g threading.cpp -o threading
650_ ./threading
Segmentation fault: 11
Though I have not made any changes to libstdc++, just updated to the last version from the port on Monday.
David
On Aug 7, 2013, at 9:09 AM, David E Barto <dbarto at visionpro.com> wrote:
>
> On Aug 7, 2013, at 8:44 AM, Jeremy Huddleston Sequoia <jeremyhu at apple.com> wrote:
>
>> Can you provide a reproducible test case?
>>
>
> Compile line is:
> /opt/local/bin/g++-mp-4.7 -std=c++11 -g threading.cpp -o threading
>
>
> The following is the result of the execution of the code.
>
> Program received signal EXC_BAD_ACCESS, Could not access memory.
> Reason: 13 at address: 0x0000000000000000
> [Switching to process 36254 thread 0x1203]
> 0x00000001000d1b20 in __once_proxy ()
> (gdb)
>
> The code follows.
> With the exception of the changes for GCC 4.7 and a 'main' at the end this is the thread library as posted at:
> https://github.com/progschj/ThreadPool
> I'm using the example code that is specified on the github as the example main here.
>
> threading.cpp
>
> #include <vector>
> #include <queue>
> #include <memory>
> #include <thread>
> #include <mutex>
> #include <condition_variable>
> #include <future>
> #include <functional>
> #include <stdexcept>
>
> typedef std::thread worker_t;
>
> class ThreadPool {
> public:
> ThreadPool(size_t threads);
> #if (__GNUC__ <= 4) || (__GNUC_MINOR__ < 8)
> //
> // By default thread pools run at a lower priority
> //
> template<class T, class F, class... Args>
> std::future<T> enqueue(F&& f, Args&&... args);
> #else
> template<class F, class... Args>
> auto enqueue(F&& f, Args&&... args)
> -> std::future<typename std::result_of<F(Args...)>::type>;
> #endif
> ~ThreadPool();
> private:
> // need to keep track of threads so we can join them
> std::vector< worker_t > workers;
> // the task queue
> std::queue< std::function<void()> > tasks;
>
> // synchronization
> std::mutex queue_mutex;
> std::condition_variable condition;
> bool stop;
> };
>
> // the constructor just launches some amount of workers
> inline ThreadPool::ThreadPool(size_t threads) : stop(false)
> {
> for(size_t i = 0;i<threads;++i)
> {
> workers.emplace_back(
> [this]
> {
> while(true)
> {
> std::unique_lock<std::mutex> lock(this->queue_mutex);
> while(!this->stop && this->tasks.empty())
> this->condition.wait(lock);
> if(this->stop && this->tasks.empty())
> return;
> std::function<void()> task(this->tasks.front());
> this->tasks.pop();
> lock.unlock();
> task();
> }
> }
> );
> }
> }
>
> #if (__GNUC__ <= 4) || (__GNUC_MINOR__ < 8)
> template<class T, class F, class... Args>
> // coverity[pass_by_value]
> inline std::future<T>
> ThreadPool::enqueue(F&& f, Args&&... args)
> {
> //typedef typename std::result_of<F(Args...)>::type return_type;
>
> // don't allow enqueueing after stopping the pool
> if(stop)
> throw std::runtime_error("enqueue on stopped ThreadPool");
>
> auto task = std::make_shared< std::packaged_task<T()> >(
> std::bind(std::forward<F>(f), std::forward<Args>(args)...)
> );
>
> std::future<T> res = task->get_future();
> {
> std::unique_lock<std::mutex> lock(queue_mutex);
> tasks.push([task](){ (*task)(); });
> }
> condition.notify_one();
> return res;
> }
>
> #else
> // add new work item to the pool
> template<class F, class... Args>
> auto ThreadPool::enqueue(F&& f, Args&&... args)
> -> std::future<typename std::result_of<F(Args...)>::type>
> {
> typedef typename std::result_of<F(Args...)>::type return_type;
>
> // don't allow enqueueing after stopping the pool
> if(stop)
> throw std::runtime_error("enqueue on stopped ThreadPool");
>
> auto task = std::make_shared< std::packaged_task<return_type()> >(
> std::bind(std::forward<F>(f), std::forward<Args>(args)...)
> );
>
> std::future<return_type> res = task->get_future();
> {
> std::unique_lock<std::mutex> lock(queue_mutex);
> tasks.push([task](){ (*task)(); });
> }
> condition.notify_one();
> return res;
> }
> #endif
>
> // the destructor joins all threads
> inline ThreadPool::~ThreadPool()
> {
> {
> std::unique_lock<std::mutex> lock(queue_mutex);
> stop = true;
> }
> condition.notify_all();
> for(size_t i = 0;i<workers.size();++i)
> {
> workers[i].join();
> }
> }
>
> #include <iostream>
>
> int
> main(int argc, char *argv[])
> {
> // create thread pool with 4 worker threads
> ThreadPool pool(4);
>
> // enqueue and store future
> auto result = pool.enqueue<int>([](int answer) { return answer; }, 42);
>
> // get result from future
> std::cout << result.get() << std::endl;
>
> }
>
More information about the macports-users
mailing list