Open main menu

CDOT Wiki β

Changes

GPU621/NoName

4,101 bytes added, 11:25, 24 November 2016
OpenMp vs C++ 11 Threads
std::cin.get(c);
}
 
====Question & Awnser====
Can one safely use C++11 multi-threading as well as OpenMP in one and the same program but without
interleaving them (i.e. no OpenMP statement in any code passed to C++11 concurrent features and no
C++11 concurrency in threads spawned by OpenMP)?
 
 
On some platforms efficient implementation could only be achieved if the OpenMP run-time is the
only one in control of the process threads. Also there are certain aspects of OpenMP that might
not play well with other threading constructs, for example the limit on the number of threads set
by OMP_THREAD_LIMIT when forking two or more concurrent parallel regions.Since the OpenMP standard
itself does not strictly forbid using other threading paradigms, but neither standardises the
interoperability with such, supporting such functionality is up to the implementers. This means
that some implementations might provide safe concurrent execution of top-level OpenMP regions,
some might not. The x86 implementers pledge to supporting it, may be because most of them are
also proponents of other execution models (e.g. Intel with Cilk and TBB, GCC with C++11, etc.)
and x86 is usually considered an "experimental" platform (other vendors are usually much more conservative).
 
 
====OpenMP code====
//Workshop 3 using the scan and reduce with openMp
 
template <typename T, typename R, typename C, typename S>
int scan(
const T* in, // source data
T* out, // output data
int size, // size of source, output data sets
R reduce, // reduction expression
C combine, // combine expression
S scan_fn, // scan function (exclusive or inclusive)
T initial // initial value
)
{
/* int tile size = (n - 1)/ntiles + 1;
reduced[tid] = reduce(in + tid * tilesize,itile == last_tile ? last_tile_size : tile_size, combine, T(0));
#pragma omp barrier
#pragma omp single
*/
int nthreads = 1;
if (size > 0) {
// requested number of tiles
int max_threads = omp_get_max_threads();
T* reduced = new T[max_threads];
T* scanRes = new T[max_threads];
 
#pragma omp parallel
{
int ntiles = omp_get_num_threads(); // Number of tiles
int itile = omp_get_thread_num();
int tile_size = (size - 1) / ntiles + 1;
int last_tile = ntiles - 1;
int last_tile_size = size - last_tile * tile_size;
if (itile == 0)
nthreads = ntiles;
// step 1 - reduce each tile separately
for (int itile = 0; itile < ntiles; itile++)
reduced[itile] = reduce(in + itile * tile_size,
itile == last_tile ? last_tile_size : tile_size, combine, T(0));
 
// step 2 - perform exclusive scan on all tiles using reduction outputs
// store results in scanRes[]
excl_scan(reduced, scanRes, ntiles, combine, T(0));
 
// step 3 - scan each tile separately using scanRes[]
for (int itile = 0; itile < ntiles; itile++)
scan_fn(in + itile * tile_size, out + itile * tile_size,
itile == last_tile ? last_tile_size : tile_size, combine,
scanRes[itile]);
}
delete[] reduced;
delete[] scanRes;
}
return nthreads;
}
 
====C++11 code====
 
#include <iostream>
#include <omp.h>
#include <chrono>
#include <vector>
#include <thread>
 
using namespace std;
 
void doNothing() {}
 
int run(int algorithmToRun)
{
auto startTime = std::chrono::system_clock::now();
 
for(int j=1; j<100000; ++j)
{
if(algorithmToRun == 1)
{
vector<thread> threads;
for(int i=0; i<16; i++)
{
threads.push_back(thread(doNothing));
}
for(auto& thread : threads) thread.join();
}
else if(algorithmToRun == 2)
{
#pragma omp parallel for num_threads(16)
for(unsigned i=0; i<16; i++)
{
doNothing();
}
}
}
 
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = endTime - startTime;
 
return elapsed_seconds.count();
}
 
int main()
{
int cppt = run(1);
int ompt = run(2);
 
cout<<cppt<<endl;
cout<<ompt<<endl;
 
return 0;
}
23
edits