57
edits
Changes
→Kernel Version 2
{{GPU610/DPS915 Index | 2017120191}}= Project Name Goes here Algo Holics =
== Team Members ==
# [mailto:ssdhillon20@myseneca.ca?subject=GPU610 Sukhbeer Dhillon], Simple Backpropogation Neural Network
# [mailto:gsingh520@myseneca.ca?subject=gpu610 Gurpreet Singh], Sudoku Puzzle Solver
# [mailto:egiang1@myseneca.ca?subject=gpu610 Edgar Giang], Merge sort
====Sudoku Puzzle Solver by - Gurpreet Singh====
Is it a program that solves Sudoku puzzles(9X9) using Bruteforce algorithm. The user can either pass a Sudoku files as an input or enter the values manually. Moreover, the file or the manual entry must strictly have 9 rows and 9 columns in them. Last but not the least, all the cells must be separated by a space and the cells that needs to be solved must have 0 in them as their value.
'''The call graph for the above execution looks like:'''
{| class="wikitable mw-collapsible mw-collapsed"
! Call Graph
|-
|
Call graph
granularity: each sample hit covers 2 byte(s) no time propagated
[9] checkColumn(int, int) [18] __static_initialization_and_destruction_0(int, int) [8] checkRow(int, int)
[11] checkSquare(int, int, int) [19] __static_initialization_and_destruction_0(int, int) [10] placeNum(int, int)
|}
From the above Call graph we can see that the program took no time in finding the solution and the maximum number of calls were made to the checkRow, checkColumn and checkSquare function. However, to get a better understanding of the program let's try a harder Sudoku puzzle.
The Call graph for the following looks like:
{| class="wikitable mw-collapsible mw-collapsed"
! Call Graph
|-
|
Call graph
[6] checkSquare(int, int, int) [20] __static_initialization_and_destruction_0(int, int) [3] placeNum(int, int)
|}
From the above Call graph we can see that for a harder Sudoku puzzle, the time increased significantly. Moreover, it can also be seen that almost 50% of the time is consumed by the checkRow function, 18.8% by checkColumn and finally 12% by the checkSquare function. Thousand of calls were made to these 3 functions, if we parallelizing these functions then the efficiency of the program can be increased significantly.
----
==== Simple Artificial Neural Network by - SukhbeerSingh====
=====Introduction=====
=====Source Code=====
Here is the [https://cognitivedemons.wordpress.com/2018/06/08/neural-network-in-c-part-2-mnist-handwritten-digits-dataset/ source code] used.
I changed the source code to put the network's training in a separate function called train. Also, I used cblas library to do matrix-matrix multiplication inside dot function, instead of using three nested for loops.{| class="wikitable mw-collapsible mw-collapsed"! nn.cpp |-| // To compile: g++ -o nn nn.cpp -std=c++11 -lgslcblas // To run: ./nn // Created by Sergei Bugrov on 4/20/18. // Copyright © 2017 Sergei Bugrov. All rights reserved. // Download dataset from: https://drive.google.com/file/d/1tVyvg6c1Eo5ojtiz0R17YEzcUe5cN285/view // Updated By - Sukhbeer Singh Dhillon, March 23rd 2019 #include <iostream> #include <iomanip> #include <cstdlib> #include <vector> #include <math.h> #include <fstream> #include <sstream> #include <string> #include <random> #include <algorithm> extern "C"{ #include<gsl/gsl_cblas.h> } using namespace std; vector<float> print ( const vector <float>& m, int n_rows, int n_columns ) { /* "Couts" the input vector as n_rows x n_columns matrix. Inputs: m: vector, matrix of size n_rows x n_columns n_rows: int, number of rows in the left matrix m1 n_columns: int, number of columns in the left matrix m1 */ vector<float> outputDigits; for( int i = 0; i != n_rows; ++i ) { float digitPredicted = 0.0; int index = 0; for( int j = 0; j != n_columns; ++j ) { float currentValue = m[i * n_columns + j]; cout << currentValue << " "; if(currentValue > digitPredicted){ digitPredicted = currentValue; index = j; } } outputDigits.push_back(index); cout << " --> Digit = " << index <<"\n"; } cout << endl; return outputDigits; } int argmax ( const vector <float>& m ) { return distance(m.begin(), max_element(m.begin(), m.end())); } vector <float> relu(const vector <float>& z){ int size = z.size(); vector <float> output; for( int i = 0; i < size; ++i ) { if (z[i] < 0){ output.push_back(0.0); } else output.push_back(z[i]); } return output; } vector <float> reluPrime (const vector <float>& z) { int size = z.size(); vector <float> output; for( int i = 0; i < size; ++i ) { if (z[i] <= 0){ output.push_back(0.0); } else output.push_back(1.0); } return output; } static vector<float> random_vector(const int size){ random_device rd; mt19937 gen(rd()); uniform_real_distribution<> distribution(0.0, 0.05); static default_random_engine generator; vector<float> data(size); generate(data.begin(), data.end(), [&]() { return distribution(generator); }); return data; } vector <float> softmax (const vector <float>& z, const int dim) { const int zsize = static_cast<int>(z.size()); vector <float> out; for (unsigned i = 0; i != zsize; i += dim) { vector <float> foo; for (unsigned j = 0; j != dim; ++j) { foo.push_back(z[i + j]); } float max_foo = *max_element(foo.begin(), foo.end()); for (unsigned j = 0; j != dim; ++j) { foo[j] = exp(foo[j] - max_foo); } float sum_of_elems = 0.0; for (unsigned j = 0; j != dim; ++j) { sum_of_elems = sum_of_elems + foo[j]; } for (unsigned j = 0; j != dim; ++j) { out.push_back(foo[j]/sum_of_elems); } } return out; } vector <float> sigmoid_d (const vector <float>& m1) { /* Returns the value of the sigmoid function derivative f'(x) = f(x)(1 - f(x)), where f(x) is sigmoid function. Input: m1, a vector. Output: x(1 - x) for every element of the input matrix m1. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> output (VECTOR_SIZE); for( unsigned i = 0; i != VECTOR_SIZE; ++i ) { output[ i ] = m1[ i ] * (1 - m1[ i ]); } return output;} vector <float> sigmoid (const vector <float>& m1) { /* Returns the value of the sigmoid function f(x) = 1/(1 + e^-x). Input: m1, a vector. Output: 1/(1 + e^-x) for every element of the input matrix m1. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> output (VECTOR_SIZE); for( unsigned i = 0; i != VECTOR_SIZE; ++i ) { output[ i ] = 1 / (1 + exp(-m1[ i ])); } return output;} vector <float> operator+(const vector <float>& m1, const vector <float>& m2){ /* Returns the elementwise sum of two vectors. Inputs: m1: a vector m2: a vector Output: a vector, sum of the vectors m1 and m2. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> sum (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ sum[i] = m1[i] + m2[i]; }; return sum;} vector <float> operator-(const vector <float>& m1, const vector <float>& m2){ /* Returns the difference between two vectors. Inputs: m1: vector m2: vector Output: vector, m1 - m2, difference between two vectors m1 and m2. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> difference (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ difference[i] = m1[i] - m2[i]; }; return difference;} vector <float> operator*(const vector <float>& m1, const vector <float>& m2){ /* Returns the product of two vectors (elementwise multiplication). Inputs: m1: vector m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> product (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ product[i] = m1[i] * m2[i]; }; return product;} vector <float> operator*(const float m1, const vector <float>& m2){ /* Returns the product of a float and a vectors (elementwise multiplication). Inputs: m1: float m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m2.size(); vector <float> product (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ product[i] = m1 * m2[i]; }; return product;} vector <float> operator/(const vector <float>& m2, const float m1){ /* Returns the product of a float and a vectors (elementwise multiplication). Inputs: m1: float m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m2.size(); vector <float> product (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ product[i] = m2[i] / m1; }; return product;} vector <float> transpose (float *m, const int C, const int R) { /* Returns a transpose matrix of input matrix. Inputs: m: vector, input matrix C: int, number of columns in the input matrix R: int, number of rows in the input matrix Output: vector, transpose matrix mT of input matrix m */ vector <float> mT (C*R); for(unsigned n = 0; n != C*R; n++) { unsigned i = n/C; unsigned j = n%C; mT[n] = m[R*j + i]; } return mT;} vector <float> dot (const vector <float>& m1, const vector <float>& m2, const int m1_rows, const int m1_columns, const int m2_columns) { /* Returns the product of two matrices: m1 x m2. Inputs: m1: vector, left matrix of size m1_rows x m1_columns m2: vector, right matrix of size m1_columns x m2_columns (the number of rows in the right matrix must be equal to the number of the columns in the left one) m1_rows: int, number of rows in the left matrix m1 m1_columns: int, number of columns in the left matrix m1 m2_columns: int, number of columns in the right matrix m2 Output: vector, m1 * m2, product of two vectors m1 and m2, a matrix of size m1_rows x m2_columns */ //use cblas vector <float> output (m1_rows*m2_columns); cblas_sgemm(CblasRowMajor,CblasNoTrans, CblasNoTrans, m1_rows, m2_columns, m1_columns, 1.0, m1.data(), m1_columns, m2.data(), m2_columns, 0.0, output.data(), m2_columns); return output;} vector<string> split(const string &s, char delim) { stringstream ss(s); string item; vector<string> tokens; while (getline(ss, item, delim)) { tokens.push_back(item); } return tokens;} void displayPrediction(vector<float> y_prediction, vector<float> y_output){ cout << "Predictions:" << "\n"; vector<float> predict = print ( y_prediction, 10, 10 ); cout << "Ground truth:" << "\n"; vector<float> output = print ( y_output, 10, 10 ); int accuracy = 0; for(int i=0; i< output.size(); i++){ if(predict[i]==output[i]) accuracy++; } cout<<" Accuracy = " << accuracy << "/" << output.size() << std::endl;} //Train the model in this function- //initialize the training data//Feed Forward//Back Propogate//Adjust Weight//Display epoch data - change this to something meaningful//@params -x training data y labelsvoid train(vector<float> x_input, vector<float> y_output, vector<float>& weight1, vector<float>& weight2, vector<float>& weight3){ int BATCH_SIZE = 256; float lr = .01/BATCH_SIZE; for(unsigned i = 0; i< 10000; i++){ // Building batches of input variables (X) and labels (y) int randindx = rand() % (42000-BATCH_SIZE); vector<float> b_X; vector<float> b_y; for (unsigned j = randindx*784; j < (randindx+BATCH_SIZE)*784; ++j){ b_X.push_back(x_input[j]); } for (unsigned k = randindx*10; k < (randindx+BATCH_SIZE)*10; ++k){ b_y.push_back(y_output[k]); } // Feed forward vector<float> a1 = relu(dot( b_X, weight1, BATCH_SIZE, 784, 128 )); vector<float> a2 = relu(dot( a1, weight2, BATCH_SIZE, 128, 64 )); vector<float> yhat = softmax(dot( a2, weight3, BATCH_SIZE, 64, 10 ), 10); // Back propagation vector<float> dyhat = (yhat - b_y); // dW3 = a2.T * dyhat vector<float> dW3 = dot(transpose( &a2[0], BATCH_SIZE, 64 ), dyhat, 64, BATCH_SIZE, 10); // dz2 = dyhat * W3.T * relu'(a2) vector<float> dz2 = dot(dyhat, transpose( &weight3[0], 64, 10 ), BATCH_SIZE, 10, 64) * reluPrime(a2); // dW2 = a1.T * dz2 vector<float> dW2 = dot(transpose( &a1[0], BATCH_SIZE, 128 ), dz2, 128, BATCH_SIZE, 64); // dz1 = dz2 * W2.T * relu'(a1) vector<float> dz1 = dot(dz2, transpose( &weight2[0], 128, 64 ), BATCH_SIZE, 64, 128) * reluPrime(a1); // dW1 = X.T * dz1 vector<float> dW1 = dot(transpose( &b_X[0], BATCH_SIZE, 784 ), dz1, 784, BATCH_SIZE, 128); // Updating the parameters weight3 = weight3 - lr * dW3; weight2 = weight2 - lr * dW2; weight1 = weight1 - lr * dW1; if ((i+1) % 1000 == 0){ cout << "-----------------------------------------------Epoch " << i+1 << "--------------------------------------------------" <<"\n"; displayPrediction(yhat, y_output); /*cout << "Predictions:" << "\n"; print ( yhat, 10, 10 ); cout << "Ground truth:" << "\n"; print ( b_y, 10, 10 );*/ vector<float> loss_m = yhat - b_y; float loss = 0.0; for (unsigned k = 0; k < BATCH_SIZE*10; ++k){ loss += loss_m[k] * loss_m[k]; } cout << " Loss " << loss/BATCH_SIZE <<"\n"; cout << "--------------------------------------------End of Epoch :(------------------------------------------------" <<"\n"; }; }} int main(int argc, const char * argv[]) { string line; vector<string> line_v; cout << "Loading data ...\n"; vector<float> X_train; vector<float> y_train; ifstream myfile ("train.txt"); if (myfile.is_open()) { while ( getline (myfile,line) ) { line_v = split(line, '\t'); int digit = strtof((line_v[0]).c_str(),0); for (unsigned i = 0; i < 10; ++i) { if (i == digit) { y_train.push_back(1.); } else y_train.push_back(0.); } int size = static_cast<int>(line_v.size()); for (unsigned i = 1; i < size; ++i) { X_train.push_back(strtof((line_v[i]).c_str(),0)); } } X_train = X_train/255.0; myfile.close(); } else cout << "Unable to open file" << '\n'; int xsize = static_cast<int>(X_train.size()); int ysize = static_cast<int>(y_train.size()); // Random initialization of the weights vector <float> W1 = random_vector(784*128); vector <float> W2 = random_vector(128*64); vector <float> W3 = random_vector(64*10); cout << "Training the model ...\n"; train(X_train, y_train, W1, W2, W3); return 0;} |} The result given below is the comparison of predictions as made by the trained network with the actual output after 10000 iterations. The ground truth is the actual value of the labels between 0-9 (true for the corresponding digit in the dataset).As you see, the accuracy of the network is not that great.
-----------------------------------------------Epoch 10000--------------------------------------------------
Ground truth:
--------------------------------------------End of Epoch :(------------------------------------------------
=====Profiling=====
Each sample counts as 0.01 seconds.
% cumulative self self total
time seconds seconds calls ns s/call ns s/call name 9799.98 29 10611075.73 84 10611075.73 84 dotdisplayPrediction(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&, int, int, int) 10.41 24 10761078.95 1544 2.23 transpose(float*, int, int) 60 3 0.16 1078.65 87 1.70 operator-16 train(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&) 0.14 1080.13 1.48 operator*(float, std::vector<float, std::allocator<float> > const&) 0.12 1081.47 1.33 relu(, std::vector<float, std::allocator<float> > const&) 0.08 1082.34 0.87 519195026 1.68 1.68 void , std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) 0.07 22 10831080.07 88 02.73 43 operator*split(std::vector<float, std::allocator<float> > string const&, std::vector<float, std::allocator<float> > const&char) 0.05 15 10831082.63 50 01.56 62 reluPrimeprint(std::vector<float, std::allocator<float> > const&, int, int) 0.03 10 1083.93 61 01.30 11 softmaxreluPrime(std::vector<float, std::allocator<float> > const&, int) 0.02 08 1084.14 48 0.21 442679 47487 519195026 0.87 47400 0.87 00 void std::vectorbasic_stringbuf<floatchar, std::allocatorchar_traits<float> >::_M_emplace_back_aux<floatchar>(float&&) 0.02 1084.31 0.17 13107321 12.98 12.98 void std::vector<float, std::allocator<floatchar> >::_M_emplace_back_aux<float const&>~basic_stringbuf(float const&) 0.01 02 1084.45 68 0.14 20 operator/(std::vectorbasic_stringbuf<floatchar, std::allocatorchar_traits<floatchar> > const&, float) 0.01 1084.58 0.13 462000 281.67 281.67 void std::vector<std::string, std::allocator<std::stringchar> >::_M_emplace_back_aux<std::string const&>(std::string const&) 0.01 1084.68 0.10 split~basic_stringbuf(std::string const&, char) 0.00 1084.68 0.00 3 1 0.00 0.00 std::vector<transpose(float*, std::allocator<float> >::vector(unsigned longint, std::allocator<float> const&int) 0.00 1084.68 0.00 1 0.00 0.00 _GLOBAL__sub_I__Z5printRKSt6vectorIfSaIfEEii
Call graph
granularity: each sample hit covers 2 byte(s) for 0.00% of 1084.68 seconds
index % time self children called name
<spontaneous>
[1] 97.9 1061.73 0.00 dot(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&, int, int, int) [1]----------------------------------------------- <spontaneous>[2] 1.4 15.23 0.00 transpose(float*, int, int) [2]----------------------------------------------- <spontaneous>[3] 099.2 11075.70 84 0.00 operator-displayPrediction(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&) [3]----------------------------------------------- <spontaneous>[4] 0.1 0.56 0.97 reluPrime(std::vector<float, std::allocator<float> > const&) [4] 0.82 0.00 491520000/519195026 void std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) [7] 0.15 0.00 310000/442679 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float>(float&&) [11]
-----------------------------------------------
-----------------------------------------------
<spontaneous>
[63] 0.1 3 10.33 00 03.01 47 relu(std::vector<float, std::allocator<float> > const&) [63] 02.00 60 0.00 30732187 3/13107321 void 3 train(std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float const&>(float const&) [12] 0.00 0.00 2075026/519195026 void std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) [7] 0.00 0.00 2679/442679 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float>(float&&) [11]----------------------------------------------- 0.00 0.00 2075026/519195026 relu(std::vector<float, std::allocator<float> > const&) [6] 0.04 0.00 25600000/519195026 softmax(std::vector<float, std::allocator<float> > const&, int) [9] 0.82 0.00 491520000/519195026 reluPrime(std::vector<float, std::allocator<float> > const&) [4][7] 0.1 0.87 0.00 519195026 void , std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) [72]
-----------------------------------------------
<spontaneous>
[84] 0.1 2 02.73 43 0.00 operator*split(std::vector<float, std::allocator<float> > string const&, std::vector<float, std::allocator<float> > const&char) [84]
-----------------------------------------------
<spontaneous>
[95] 0.1 01.30 62 0.27 00 softmaxprint(std::vector<float, std::allocator<float> > const&, int) [9] 0.17 0.00 12800000/13107321 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float const&>(float const&) [12] 0.06 0.00 130000/442679 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float>(float&&) [11] 0.04 0.00 25600000/519195026 void std::vector<float, std::allocator<float> >::emplace_back<float>(float&&int) [75]
-----------------------------------------------
<spontaneous>
[10] 0.0 0.10 0.13 split(std::string const&, char) [106] 0.13 0.00 462000/462000 void std::vector<std::string, std::allocator<std::string> >::_M_emplace_back_aux<std::string const&>(std::string const&) [14]----------------------------------------------- 0.00 1 01.00 2679/442679 relu(std::vector<float, std::allocator<float> > const&) [6] 0.06 11 0.00 130000/442679 softmax(std::vector<float, std::allocator<float> > const&, int) [9] 0.15 0.00 310000/442679 reluPrime(std::vector<float, std::allocator<float> > const&) [4][11] 0.0 0.21 0.00 442679 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float>(float&&) [116]
-----------------------------------------------
0.00 87 0.00 307321519195026/13107321 519195026 relutrain(std::vector<float, std::allocator<float> > const, std::vector<float, std::allocator<float> >, std::vector<float, std::allocator<float> >&) [6] 0.17 0.00 12800000/13107321 softmax(, std::vector<float, std::allocator<float> > const&, intstd::vector<float, std::allocator<float> >&) [92][127] 0.0 1 0.17 87 0.00 13107321 519195026 void std::vectorbasic_stringbuf<char, std::char_traits<floatchar>, std::allocator<floatchar> >::_M_emplace_back_aux<float const&>~basic_stringbuf(float const&) [127]
-----------------------------------------------
<spontaneous>
[138] 0.0 0.14 20 0.00 operator/(std::vectorbasic_stringbuf<floatchar, std::allocatorchar_traits<float> > const&, float) [13]----------------------------------------------- 0.13 0.00 462000/462000 split(std::string const&, char) [10][14] 0.0 0.13 0.00 462000 void std::vector<std::string, std::allocator<std::string> >::_M_emplace_back_aux<std::string const&>(std::string const&) [14]----------------------------------------------- 0.00 0.00 3/3 random_vector(int) [28][22] 0.0 0.00 0.00 3 std::vector<float, std::allocator<floatchar> >::vector~basic_stringbuf(unsigned long, std::allocator<float> const&) [228]
-----------------------------------------------
0.00 0.00 1/1 __libc_csu_init std::vector<float, std::allocator<float> >::vector(unsigned long, std::allocator<float> const&) [3830][2316] 0.0 0.00 0.00 1 _GLOBAL__sub_I__Z5printRKSt6vectorIfSaIfEEii transpose(float*, int, int) [2316]
-----------------------------------------------
�
Index by function name
|}
=====Analysis=====
The total execution time of the program is around 10 3 minutes. As is evident from The profiling results, most of spot displayPrediction as the function with maximum execution time is taken up by . However, thats because it displays a matrix using the ''dotnaive O(n2)'' for-loop. train() is the next function as it does matrix-matrix multiplicationwith the maximum time. This is the hotspot of for the program that can . If this function is made to be made efficient by doing this computation the kernel and other vector multiplications on the GPUfunctions that it calls as device functions, the program would fasten by a good proportion.
----
==== Sorting Algorithms - Merge Sort - Edgar Giang====
=====Intro=====
=====How to run the program=====
The following commands command was tested in puttymatrix:
g++ fileName.cpp -pg -o test
=====Running the program=====
This would be the following output:
As you can see the input used was 200,000. The time indicated in the output is in milliseconds so it took 166100 milliseconds to complete. A much larger input would result in the program running for a very long time.
=====Profiling=====
{| class="wikitable mw-collapsible mw-collapsed"
=====Analysis=====
21.90 0.07 0.07 600021 0.00 0.00 int* std::__copy_move<false, true, std::random_access_iterator_tag>::__copy_m<int>(int const*, int const*, int*)
From the flat profile,the hotspots for this program would be the function merge function whih was called 200,006 times and thee int* std::__copy__move. The merge function would take up 52.3% of the time
----
=== Assignment 2 ===Our initial idea was to use the neural network code for our assignment 2. But since the algorithm itself was not very accurate (2/10 correct predictions even after 10,000 training iterations), we decided to paralellize merge sort. Soon we realized that since its Big O classification was n log n, offloading computations to GPU would not be that effective. So, we settled with the cosine transform library, as described below. ====Cosine Tranformation==== The Cosine_Transform is a simple C++ library which demonstrates properties of the Discrete cosine Transform for real data. The Discrete Cosine Transform or DCT is used to create jpeg (compressed images). The formula used here is: | (√1/n) , if u=0; 0≤v≤n-1 C(u,v) = | (√2/n) * cos[((2*v+1)π*u)/2n], if 1≤u≤n-1; 0≤v≤n-1 Where, u is the row index, v is the column index and n is the total number of elements in a row/column in the computational matrix. This [https://www.youtube.com/watch?v=tW3Hc0Wrgl0 Link] can be used for better understanding of the above formula. Here is the [https://people.sc.fsu.edu/~jburkardt/cpp_src/cosine_transform/cosine_transform.html source code] used. =====Profiling=====The flat profile for the above serial code looks like: {| class="wikitable mw-collapsible mw-collapsed"! Flat Profile|-| 1 2 3 4 granularity: each sample hit covers 2 byte(s) for 0.68% of 1.47 seconds 5 6 index % time self children called name 7 <spontaneous> 8 [1] 100.0 0.00 1.47 main [1] 9 0.00 1.47 1/1 cosine_transform_test01(int) [3] 10 ----------------------------------------------- 11 1.47 0.00 1/1 cosine_transform_test01(int) [3] 12 [2] 100.0 1.47 0.00 1 cosine_transform_data(int, double*) [2] 13 ----------------------------------------------- 14 0.00 1.47 1/1 main [1] 15 [3] 100.0 0.00 1.47 1 cosine_transform_test01(int) [3] 16 1.47 0.00 1/1 cosine_transform_data(int, double*) [2] 17 0.00 0.00 1/1 r8vec_uniform_01_new(int, int&) [14] 18 0.00 0.00 1/1 reportTime(char const*, std::chrono::duration<long, std::ratio<1l, 1000000000l> >) [13] 19 0.00 0.00 1/1 std::common_type<std::chrono::duration<long, std::ratio<1l, 1000000000l> >, std::chrono::duration<long, std::ratio<1l, 1000000000l> > >::type std::chrono::operator-<std::chrono::_V2::s teady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> >, std::chrono::duration<long, std::ratio<1l, 1000000000l> > >(std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 10 00000000l> > > const&, std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> > > const&) [21] 20 ----------------------------------------------- 21 0.00 0.00 1/3 std::chrono::duration<long, std::ratio<1l, 1000l> > std::chrono::__duration_cast_impl<std::chrono::duration<long, std::ratio<1l, 1000l> >, std::ratio<1l, 1000000l>, long, true, false>: :__cast<long, std::ratio<1l, 1000000000l> >(std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&) [18] 22 0.00 0.00 2/3 std::common_type<std::chrono::duration<long, std::ratio<1l, 1000000000l> >, std::chrono::duration<long, std::ratio<1l, 1000000000l> > >::type std::chrono::operator-<long, std::ratio<1l , 1000000000l>, long, std::ratio<1l, 1000000000l> >(std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&, std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&) [22] 23 [10] 0.0 0.00 0.00 3 std::chrono::duration<long, std::ratio<1l, 1000000000l> >::count() const [10] 24 ----------------------------------------------- 25 0.00 0.00 2/2 std::common_type<std::chrono::duration<long, std::ratio<1l, 1000000000l> >, std::chrono::duration<long, std::ratio<1l, 1000000000l> > >::type std::chrono::operator-<std::chrono::_V2::s teady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> >, std::chrono::duration<long, std::ratio<1l, 1000000000l> > >(std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 10 00000000l> > > const&, std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> > > const&) [21] 26 [11] 0.0 0.00 0.00 2 std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> > >::time_since_epoch() const [11] 27 ----------------------------------------------- 28 0.00 0.00 1/1 __libc_csu_init [28] 29 [12] 0.0 0.00 0.00 1 _GLOBAL__sub_I__Z20r8vec_uniform_01_newiRi [12] 30 0.00 0.00 1/1 __static_initialization_and_destruction_0(int, int) [15] 31 ----------------------------------------------- 32 0.00 0.00 1/1 cosine_transform_test01(int) [3] 33 [13] 0.0 0.00 0.00 1 reportTime(char const*, std::chrono::duration<long, std::ratio<1l, 1000000000l> >) [13] 34 0.00 0.00 1/1 std::enable_if<std::chrono::__is_duration<std::chrono::duration<long, std::ratio<1l, 1000l> > >::value, std::chrono::duration<long, std::ratio<1l, 1000l> > >::type std::chrono::duratio n_cast<std::chrono::duration<long, std::ratio<1l, 1000l> >, long, std::ratio<1l, 1000000000l> >(std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&) [17] 35 0.00 0.00 1/1 std::chrono::duration<long, std::ratio<1l, 1000l> >::count() const [16] 36 ----------------------------------------------- 37 0.00 0.00 1/1 cosine_transform_test01(int) [3] 38 [14] 0.0 0.00 0.00 1 r8vec_uniform_01_new(int, int&) [14] 39 ----------------------------------------------- 40 0.00 0.00 1/1 _GLOBAL__sub_I__Z20r8vec_uniform_01_newiRi [12] 41 [15] 0.0 0.00 0.00 1 __static_initialization_and_destruction_0(int, int) [15] 42 ----------------------------------------------- 43 0.00 0.00 1/1 reportTime(char const*, std::chrono::duration<long, std::ratio<1l, 1000000000l> >) [13] 44 [16] 0.0 0.00 0.00 1 std::chrono::duration<long, std::ratio<1l, 1000l> >::count() const [16] 45 ----------------------------------------------- 46 0.00 0.00 1/1 reportTime(char const*, std::chrono::duration<long, std::ratio<1l, 1000000000l> >) [13] 47 [17] 0.0 0.00 0.00 1 std::enable_if<std::chrono::__is_duration<std::chrono::duration<long, std::ratio<1l, 1000l> > >::value, std::chrono::duration<long, std::ratio<1l, 1000l> > >::type std::chrono::duration_ca st<std::chrono::duration<long, std::ratio<1l, 1000l> >, long, std::ratio<1l, 1000000000l> >(std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&) [17] 48 0.00 0.00 1/1 std::chrono::duration<long, std::ratio<1l, 1000l> > std::chrono::__duration_cast_impl<std::chrono::duration<long, std::ratio<1l, 1000l> >, std::ratio<1l, 1000000l>, long, true, false>: :__cast<long, std::ratio<1l, 1000000000l> >(std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&) [18] 49 ----------------------------------------------- 50 0.00 0.00 1/1 std::enable_if<std::chrono::__is_duration<std::chrono::duration<long, std::ratio<1l, 1000l> > >::value, std::chrono::duration<long, std::ratio<1l, 1000l> > >::type std::chrono::duratio n_cast<std::chrono::duration<long, std::ratio<1l, 1000l> >, long, std::ratio<1l, 1000000000l> >(std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&) [17] 51 [18] 0.0 0.00 0.00 1 std::chrono::duration<long, std::ratio<1l, 1000l> > std::chrono::__duration_cast_impl<std::chrono::duration<long, std::ratio<1l, 1000l> >, std::ratio<1l, 1000000l>, long, true, false>::__c ast<long, std::ratio<1l, 1000000000l> >(std::chrono::duration<long, std::ratio<1l, 1000000000l> > const&) [18] 52 0.00 0.00 1/3 std::chrono::duration<long, std::ratio<1l, 1000000000l> >::count() const [10] |} As is evident, the algorithm is O(n2) currently. Using thread indices on the GPU to replace the for loops could potentially improve performance.To increase the efficiency of the program we transformed the '''cosine_transform_data''' function into a kernel named '''cosTransformKernel''' which offloads the compute intense calculation of the program to the GPU. =====Kernel Version 1====={| class="wikitable mw-collapsible mw-collapsed"! Modified Code|-| # include <iostream> # include <iomanip> # include <ctime> # include <chrono> # include <cstdlib> # include <cmath> #include <cuda_runtime.h> using namespace std; using namespace std::chrono; const double pi = 3.141592653589793; const int ntpb = 1024; void cosine_transform_test01 ( int size ); double *r8vec_uniform_01_new ( int n, int &seed ){ int i; const int i4_huge = 2147483647; int k; double *r; if ( seed == 0 ){ cerr << "\n"; cerr << "R8VEC_UNIFORM_01_NEW - Fatal error!\n"; cerr << " Input value of SEED = 0.\n"; exit ( 1 ); } r = new double[n]; for ( i = 0; i < n; i++ ){ k = seed / 127773; seed = 16807 * ( seed - k * 127773 ) - k * 2836; if ( seed < 0 ){ seed = seed + i4_huge; } r[i] = ( double ) ( seed ) * 4.656612875E-10; } return r; } double *cosine_transform_data ( int n, double d[] ){ double angle; double *c; int i; int j; c = new double[n]; for ( i = 0; i < n; i++ ){ c[i] = 0.0; for ( j = 0; j < n; j++ ){ angle = pi * ( double ) ( i * ( 2 * j + 1 ) ) / ( double ) ( 2 * n ); c[i] = c[i] + cos ( angle ) * d[j]; } c[i] = c[i] * sqrt ( 2.0 / ( double ) ( n ) ); } return c; } void reportTime(const char* msg, steady_clock::duration span) { auto ms = duration_cast<milliseconds>(span); std::cout << msg << " - took - " << ms.count() << " millisecs" << std::endl; } __global__ void cosTransformKernel(double *a, double *b, int n){ double angle; const double pi = 3.141592653589793; int i = blockIdx.x * blockDim.x + threadIdx.x; for(int j=0; j<n; j++){ angle = pi * (double) (i*(2*j+1)) / (double)(2*n); b[i] += cos ( angle ) * a[j]; } b[i] *= sqrt( 2.0 / (double) n ); } int main (int argc, char* argv[] ){ if (argc != 2) { std::cerr << argv[0] << ": invalid number of arguments\n"; std::cerr << "Usage: " << argv[0] << " size_of_vector\n"; return 1; } int n = std::atoi(argv[1]); cosine_transform_test01 (n); return 0; } void cosine_transform_test01 ( int size){ int n = size; int seed; double *r; double *hs; double *s = new double[n]; double *d_a; double *d_b; //allocate memory on the device for the randomly generated array and for the array in which transform values will be stored cudaMalloc((void**)&d_a,sizeof(double) * n); cudaMalloc((void**)&d_b,sizeof(double) * n); seed = 123456789; r = r8vec_uniform_01_new ( n, seed ); //copy randomly generated values from host to device cudaMemcpy(d_a,r,sizeof(double)*n,cudaMemcpyHostToDevice); int nblks = (n + ntpb - 1) / ntpb; steady_clock::time_point ts, te; ts = steady_clock::now(); cosTransformKernel<<<nblks,ntpb>>>(d_a,d_b,size); cudaDeviceSynchronize(); te = steady_clock::now(); reportTime("Cosine Transform on device",te-ts); cudaMemcpy(s,d_b,sizeof(double)*n,cudaMemcpyDeviceToHost); ts = steady_clock::now(); hs = cosine_transform_data ( n, r ); te = steady_clock::now(); reportTime("Cosine Transform on host",te-ts); cudaFree(d_a); cudaFree(d_b); delete [] r; delete [] s; delete [] hs;} |} The graph for the execution time difference between the device and the host looks like:
[[File:kernel1.png]]
Even though the kernel includes a for-loop the execution time has decreased drastically. Thats because each thread is now responsible for one calculating one element of the final Cos transformed matrix(unit vector).
=== Assignment 3 ===
For optimizing the code better, we thought of removing the iterative loop from the kernel by using threadIdx.y to control calculation of each element's cosine for that position in the supposed matrix. The problem in this was that each thread was in a racing condition to write to the same memory location, to sum up the cosine transformations for all elements of that row. We solved this by using the atomic function. Its prototype is as follows.
double atomicAdd(double* address, double value)
=====Kernel Version 2=====
{| class="wikitable mw-collapsible mw-collapsed"
! Kernel 2
|-
|
# include <cmath>
# include <cstdlib>
# include <iostream>
# include <iomanip>
# include <ctime>
# include <chrono>
# include <cstdlib>
# include <cmath>
#include <limits>
#include <cuda_runtime.h>
#include <cuda.h>
using namespace std;
using namespace std::chrono;
const double pi = 3.141592653589793;
const unsigned ntpb = 32;
void cosine_transform_test01 ( int size );
double *r8vec_uniform_01_new ( int n, int &seed ){
int i;
const int i4_huge = 2147483647;
int k;
double *r;
if ( seed == 0 ){
cerr << "\n";
cerr << "R8VEC_UNIFORM_01_NEW - Fatal error!\n";
cerr << " Input value of SEED = 0.\n";
exit ( 1 );
}
r = new double[n];
for ( i = 0; i < n; i++ ){
k = seed / 127773;
seed = 16807 * ( seed - k * 127773 ) - k * 2836;
if ( seed < 0 ){
seed = seed + i4_huge;
}
r[i] = ( double ) ( seed ) * 4.656612875E-10;
}
return r;
}
double *cosine_transform_data ( int n, double d[] ){
double angle;
double *c;
int i;
int j;
c = new double[n];
for ( i = 0; i < n; i++ ){
c[i] = 0.0;
for ( j = 0; j < n; j++ ){
angle = pi * ( double ) ( i * ( 2 * j + 1 ) ) / ( double ) ( 2 * n );
c[i] = c[i] + cos ( angle ) * d[j];
}
c[i] = c[i] * sqrt ( 2.0 / ( double ) ( n ) );
}
return c;
}
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
__global__ void cosTransformKernel(double *a, double *b, const int n){
double angle;
const double pi = 3.141592653589793;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if(i<n && j<n){
angle = pi * ( double ) ( i * ( 2 * j + 1 ) ) / ( double ) ( 2 * n );
double value = cos ( angle ) * a[j];
b[i] = atomicAdd(&b[i], value);
}
//square root of the whole cos transformed row term
if(j==n-1 && i<n){
b[i] *= sqrt ( 2.0 / ( double ) ( n ) );
}
}
int main (int argc, char* argv[] ){
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vector\n";
return 1;
}
int n = std::atoi(argv[1]);
cosine_transform_test01 (n);
return 0;
}
void cosine_transform_test01 ( int size){
int n = size;
int seed;
double *r;
double *hs; //host side pointer to store the array returned from host side cosine_transform_data, for comparison purposes
double *s = new double[n];
//double *t;
double *d_a;
double *d_b;
//allocate memory on the device for the randomly generated array and for the array in which transform values will be stored
cudaMalloc((void**)&d_a,sizeof(double) * n);
cudaMalloc((void**)&d_b,sizeof(double) * n);
seed = 123456789;
r = r8vec_uniform_01_new ( n, seed );
//copy randomly generated values from host to device
for(int i=0; i<n; i++)
s[i]=0.0;
cudaMemcpy(d_a,r,sizeof(double)*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,s,sizeof(double)*n,cudaMemcpyHostToDevice);
int nblks = (n + ntpb - 1) / ntpb;
dim3 grid(nblks,nblks,1);
dim3 block(ntpb,ntpb,1);
steady_clock::time_point ts, te;
ts = steady_clock::now();
cosTransformKernel<<<grid,block>>>(d_a,d_b,size);
cudaDeviceSynchronize();
te = steady_clock::now();
reportTime("Cosine Transform on device",te-ts);
cudaMemcpy(s,d_b,sizeof(double)*n,cudaMemcpyDeviceToHost);
ts = steady_clock::now();
hs = cosine_transform_data ( n, r );
te = steady_clock::now();
reportTime("Cosine Transform on host",te-ts);
cudaFree(d_a);
cudaFree(d_b);
delete [] r;
delete [] s;
delete [] hs;
//delete [] t;
return;
}
|}
Here is a comparison between the naive and optimized kernel
[[File:kernel2.jpg]]
Evidently, there is some performance boost for the new version. However, each call to atomicAdd by a thread locks the global memory until the old value is read and added to the passed value. This deters faster execution as might be expected.