Changes

Jump to: navigation, search

Algo holics

10,090 bytes added, 16:52, 24 March 2019
no edit summary
{{GPU610/DPS915 Index | 2017120191}}
= Project Name Goes here =
== Team Members ==
# [mailto:ssdhillon20@myseneca.ca?subject=GPU610 Sukhbeer Dhillon], Simple Backpropogation Neural Network
# [mailto:gsingh520@myseneca.ca?subject=gpu610 Gurpreet Singh], Sudoku Puzzle Solver
# [mailto:egiang1@myseneca.ca?subject=gpu610 Edgar Giang], Merge sort
=====Source Code=====
Here is the [https://cognitivedemons.wordpress.com/2018/06/08/neural-network-in-c-part-2-mnist-handwritten-digits-dataset/ source code] used.
 I changed the source code to put the network's training in a separate function called train. Also, I used cblas library to do matrix-matrix multiplication inside dot function, instead of using three nested for loops.{| class="wikitable mw-collapsible mw-collapsed"! nn.cpp |-| // To compile: g++ -o nn nn.cpp -std=c++11 -lgslcblas // To run: ./nn // Created by Sergei Bugrov on 4/20/18. // Copyright © 2017 Sergei Bugrov. All rights reserved. // Download dataset from: https://drive.google.com/file/d/1tVyvg6c1Eo5ojtiz0R17YEzcUe5cN285/view // Updated By - Sukhbeer Singh Dhillon, March 23rd 2019  #include <iostream> #include <iomanip> #include <cstdlib> #include <vector> #include <math.h> #include <fstream> #include <sstream> #include <string> #include <random> #include <algorithm> extern "C"{ #include<gsl/gsl_cblas.h> }  using namespace std;  vector<float> print ( const vector <float>& m, int n_rows, int n_columns ) { /* "Couts" the input vector as n_rows x n_columns matrix. Inputs: m: vector, matrix of size n_rows x n_columns n_rows: int, number of rows in the left matrix m1 n_columns: int, number of columns in the left matrix m1 */ vector<float> outputDigits; for( int i = 0; i != n_rows; ++i ) { float digitPredicted = 0.0; int index = 0; for( int j = 0; j != n_columns; ++j ) { float currentValue = m[i * n_columns + j]; cout << currentValue << " "; if(currentValue > digitPredicted){ digitPredicted = currentValue; index = j; } } outputDigits.push_back(index); cout << " --> Digit = " << index <<"\n"; } cout << endl; return outputDigits; }  int argmax ( const vector <float>& m ) { return distance(m.begin(), max_element(m.begin(), m.end())); }  vector <float> relu(const vector <float>& z){ int size = z.size(); vector <float> output; for( int i = 0; i < size; ++i ) { if (z[i] < 0){ output.push_back(0.0); } else output.push_back(z[i]); } return output; }  vector <float> reluPrime (const vector <float>& z) { int size = z.size(); vector <float> output; for( int i = 0; i < size; ++i ) { if (z[i] <= 0){ output.push_back(0.0); } else output.push_back(1.0); } return output; }   static vector<float> random_vector(const int size){ random_device rd; mt19937 gen(rd()); uniform_real_distribution<> distribution(0.0, 0.05); static default_random_engine generator; vector<float> data(size); generate(data.begin(), data.end(), [&]() { return distribution(generator); }); return data; }  vector <float> softmax (const vector <float>& z, const int dim) { const int zsize = static_cast<int>(z.size()); vector <float> out; for (unsigned i = 0; i != zsize; i += dim) { vector <float> foo; for (unsigned j = 0; j != dim; ++j) { foo.push_back(z[i + j]); } float max_foo = *max_element(foo.begin(), foo.end()); for (unsigned j = 0; j != dim; ++j) { foo[j] = exp(foo[j] - max_foo); } float sum_of_elems = 0.0; for (unsigned j = 0; j != dim; ++j) { sum_of_elems = sum_of_elems + foo[j]; } for (unsigned j = 0; j != dim; ++j) { out.push_back(foo[j]/sum_of_elems); } } return out; }  vector <float> sigmoid_d (const vector <float>& m1) { /* Returns the value of the sigmoid function derivative f'(x) = f(x)(1 - f(x)), where f(x) is sigmoid function. Input: m1, a vector. Output: x(1 - x) for every element of the input matrix m1. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> output (VECTOR_SIZE); for( unsigned i = 0; i != VECTOR_SIZE; ++i ) { output[ i ] = m1[ i ] * (1 - m1[ i ]); } return output;} vector <float> sigmoid (const vector <float>& m1) { /* Returns the value of the sigmoid function f(x) = 1/(1 + e^-x). Input: m1, a vector. Output: 1/(1 + e^-x) for every element of the input matrix m1. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> output (VECTOR_SIZE); for( unsigned i = 0; i != VECTOR_SIZE; ++i ) { output[ i ] = 1 / (1 + exp(-m1[ i ])); } return output;} vector <float> operator+(const vector <float>& m1, const vector <float>& m2){ /* Returns the elementwise sum of two vectors. Inputs: m1: a vector m2: a vector Output: a vector, sum of the vectors m1 and m2. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> sum (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ sum[i] = m1[i] + m2[i]; }; return sum;} vector <float> operator-(const vector <float>& m1, const vector <float>& m2){ /* Returns the difference between two vectors. Inputs: m1: vector m2: vector Output: vector, m1 - m2, difference between two vectors m1 and m2. */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> difference (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ difference[i] = m1[i] - m2[i]; }; return difference;} vector <float> operator*(const vector <float>& m1, const vector <float>& m2){ /* Returns the product of two vectors (elementwise multiplication). Inputs: m1: vector m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m1.size(); vector <float> product (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ product[i] = m1[i] * m2[i]; }; return product;} vector <float> operator*(const float m1, const vector <float>& m2){ /* Returns the product of a float and a vectors (elementwise multiplication). Inputs: m1: float m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m2.size(); vector <float> product (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ product[i] = m1 * m2[i]; }; return product;} vector <float> operator/(const vector <float>& m2, const float m1){ /* Returns the product of a float and a vectors (elementwise multiplication). Inputs: m1: float m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m2.size(); vector <float> product (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i){ product[i] = m2[i] / m1; }; return product;} vector <float> transpose (float *m, const int C, const int R) { /* Returns a transpose matrix of input matrix. Inputs: m: vector, input matrix C: int, number of columns in the input matrix R: int, number of rows in the input matrix Output: vector, transpose matrix mT of input matrix m */ vector <float> mT (C*R); for(unsigned n = 0; n != C*R; n++) { unsigned i = n/C; unsigned j = n%C; mT[n] = m[R*j + i]; } return mT;} vector <float> dot (const vector <float>& m1, const vector <float>& m2, const int m1_rows, const int m1_columns, const int m2_columns) { /* Returns the product of two matrices: m1 x m2. Inputs: m1: vector, left matrix of size m1_rows x m1_columns m2: vector, right matrix of size m1_columns x m2_columns (the number of rows in the right matrix must be equal to the number of the columns in the left one) m1_rows: int, number of rows in the left matrix m1 m1_columns: int, number of columns in the left matrix m1 m2_columns: int, number of columns in the right matrix m2 Output: vector, m1 * m2, product of two vectors m1 and m2, a matrix of size m1_rows x m2_columns */   //use cblas vector <float> output (m1_rows*m2_columns); cblas_sgemm(CblasRowMajor,CblasNoTrans, CblasNoTrans, m1_rows, m2_columns, m1_columns, 1.0, m1.data(), m1_columns, m2.data(), m2_columns, 0.0, output.data(), m2_columns); return output;} vector<string> split(const string &s, char delim) { stringstream ss(s); string item; vector<string> tokens; while (getline(ss, item, delim)) { tokens.push_back(item); } return tokens;} void displayPrediction(vector<float> y_prediction, vector<float> y_output){ cout << "Predictions:" << "\n"; vector<float> predict = print ( y_prediction, 10, 10 ); cout << "Ground truth:" << "\n"; vector<float> output = print ( y_output, 10, 10 ); int accuracy = 0; for(int i=0; i< output.size(); i++){ if(predict[i]==output[i]) accuracy++; } cout<<" Accuracy = " << accuracy << "/" << output.size() << std::endl;} //Train the model in this function- //initialize the training data//Feed Forward//Back Propogate//Adjust Weight//Display epoch data - change this to something meaningful//@params -x training data y labelsvoid train(vector<float> x_input, vector<float> y_output, vector<float>& weight1, vector<float>& weight2, vector<float>& weight3){ int BATCH_SIZE = 256; float lr = .01/BATCH_SIZE; for(unsigned i = 0; i< 10000; i++){ // Building batches of input variables (X) and labels (y) int randindx = rand() % (42000-BATCH_SIZE); vector<float> b_X; vector<float> b_y; for (unsigned j = randindx*784; j < (randindx+BATCH_SIZE)*784; ++j){ b_X.push_back(x_input[j]); } for (unsigned k = randindx*10; k < (randindx+BATCH_SIZE)*10; ++k){ b_y.push_back(y_output[k]); }  // Feed forward vector<float> a1 = relu(dot( b_X, weight1, BATCH_SIZE, 784, 128 )); vector<float> a2 = relu(dot( a1, weight2, BATCH_SIZE, 128, 64 )); vector<float> yhat = softmax(dot( a2, weight3, BATCH_SIZE, 64, 10 ), 10); // Back propagation vector<float> dyhat = (yhat - b_y); // dW3 = a2.T * dyhat vector<float> dW3 = dot(transpose( &a2[0], BATCH_SIZE, 64 ), dyhat, 64, BATCH_SIZE, 10); // dz2 = dyhat * W3.T * relu'(a2) vector<float> dz2 = dot(dyhat, transpose( &weight3[0], 64, 10 ), BATCH_SIZE, 10, 64) * reluPrime(a2); // dW2 = a1.T * dz2 vector<float> dW2 = dot(transpose( &a1[0], BATCH_SIZE, 128 ), dz2, 128, BATCH_SIZE, 64); // dz1 = dz2 * W2.T * relu'(a1) vector<float> dz1 = dot(dz2, transpose( &weight2[0], 128, 64 ), BATCH_SIZE, 64, 128) * reluPrime(a1); // dW1 = X.T * dz1 vector<float> dW1 = dot(transpose( &b_X[0], BATCH_SIZE, 784 ), dz1, 784, BATCH_SIZE, 128); // Updating the parameters weight3 = weight3 - lr * dW3; weight2 = weight2 - lr * dW2; weight1 = weight1 - lr * dW1; if ((i+1) % 1000 == 0){ cout << "-----------------------------------------------Epoch " << i+1 << "--------------------------------------------------" <<"\n"; displayPrediction(yhat, y_output); /*cout << "Predictions:" << "\n"; print ( yhat, 10, 10 ); cout << "Ground truth:" << "\n"; print ( b_y, 10, 10 );*/ vector<float> loss_m = yhat - b_y; float loss = 0.0; for (unsigned k = 0; k < BATCH_SIZE*10; ++k){ loss += loss_m[k] * loss_m[k]; } cout << " Loss " << loss/BATCH_SIZE <<"\n"; cout << "--------------------------------------------End of Epoch :(------------------------------------------------" <<"\n"; }; }}   int main(int argc, const char * argv[]) {  string line; vector<string> line_v;  cout << "Loading data ...\n"; vector<float> X_train; vector<float> y_train; ifstream myfile ("train.txt"); if (myfile.is_open()) { while ( getline (myfile,line) ) { line_v = split(line, '\t'); int digit = strtof((line_v[0]).c_str(),0); for (unsigned i = 0; i < 10; ++i) { if (i == digit) { y_train.push_back(1.); } else y_train.push_back(0.); } int size = static_cast<int>(line_v.size()); for (unsigned i = 1; i < size; ++i) { X_train.push_back(strtof((line_v[i]).c_str(),0)); } } X_train = X_train/255.0; myfile.close(); } else cout << "Unable to open file" << '\n'; int xsize = static_cast<int>(X_train.size()); int ysize = static_cast<int>(y_train.size()); // Random initialization of the weights vector <float> W1 = random_vector(784*128); vector <float> W2 = random_vector(128*64); vector <float> W3 = random_vector(64*10);  cout << "Training the model ...\n"; train(X_train, y_train, W1, W2, W3); return 0;}  |} The result given below is the comparison of predictions as made by the trained network with the actual output after 10000 iterations. The ground truth is the actual value of the labels between 0-9 (true for the corresponding digit in the dataset).As you see, the accuracy of the network is not that great.
-----------------------------------------------Epoch 10000--------------------------------------------------
Predictions: 0.000848207 9.07445e-06 0.000145165 0.797735 4.94866e-06 0.19374 1.55013e-06 0.000244941 0.00657041 0.000700498 --> Digit = 3 1.36476e-05 1.07548e-07 8.3835e-05 0.000744837 0.299883 9.37717e-05 3.53349e-05 0.00822595 0.00210021 0.688819 --> Digit = 9 5.11556e-06 0.000616957 0.000233088 0.87458 2.20579e-05 0.0140489 5.03569e-08 0.000518445 0.0826038 0.0273714 --> Digit = 3 0.0178851 3.64621e-08 0.0174107 0.000322792 0.716312 0.00120967 0.189534 0.00303238 0.00613965 0.0481543 --> Digit = 4 7.40077e-07 0.96872 0.014224 0.00555447 2.56397e-05 0.000115577 0.000157107 0.00366156 0.00669771 0.000842866 --> Digit = 1 7.37584e-05 0.00306397 0.0184482 0.056542 0.000217984 0.0807415 0.000430994 1.09367e-05 0.838792 0.00167921 --> Digit = 8 1.23026e-05 1.10682e-09 6.47478e-07 0.000129503 1.28475e-05 1.20242e-05 1.18166e-09 0.953265 2.63176e-05 0.046541 --> Digit = 7 0.974183 3.50241e-18 1.99895e-07 3.4534e-07 2.3755e-11 0.0257772 1.96811e-09 6.99407e-09 3.92052e-05 2.28711e-08 --> Digit = 0 2.21581e-05 9.26954e-09 0.000182046 0.00336899 3.40876e-05 0.0800376 8.35955e-07 1.2496e-07 0.914781 0.00157335 --> Digit = 8 8.59312e-07 4.1739e-05 0.000106891 0.000122639 0.00018295 4.02451e-05 7.21105e-07 0.898311 0.00405182 0.0971408 --> Digit = 7
Ground truth:
0 1 0 0 1 0 0 0 0 0 0 --> Digit = 1 1 0 0 0 0 0 0 0 0 0 1 --> Digit = 0 0 1 0 0 1 0 0 0 0 0 0 --> Digit = 1 0 0 0 0 1 0 0 0 0 0 --> Digit = 4 1 0 0 1 0 0 0 0 0 0 0 --> Digit = 0 1 0 0 0 0 0 0 0 0 1 0 --> Digit = 0 0 0 0 0 0 0 0 1 0 0 --> Digit = 7 1 0 0 0 1 0 0 0 0 0 0 --> Digit = 3 0 0 0 0 0 1 0 0 0 1 0 --> Digit = 5 0 0 0 1 0 0 0 0 1 0 0 --> Digit = 3  Accuracy = 2/10 Loss 0.184251
--------------------------------------------End of Epoch :(------------------------------------------------
Each sample counts as 0.01 seconds.
% cumulative self self total
time seconds seconds calls ns s/call ns s/call name 9799.98 29 10611075.73 84 10611075.73 84 dotdisplayPrediction(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&, int, int, int) 10.41 24 10761078.95 1544 2.23 transpose(float*, int, int) 60 3 0.16 1078.65 87 1.70 operator-16 train(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&) 0.14 1080.13 1.48 operator*(float, std::vector<float, std::allocator<float> > const&) 0.12 1081.47 1.33 relu(, std::vector<float, std::allocator<float> > const&) 0.08 1082.34 0.87 519195026 1.68 1.68 void , std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) 0.07 22 10831080.07 88 02.73 43 operator*split(std::vector<float, std::allocator<float> > string const&, std::vector<float, std::allocator<float> > const&char) 0.05 15 10831082.63 50 01.56 62 reluPrimeprint(std::vector<float, std::allocator<float> > const&, int, int) 0.03 10 1083.93 61 01.30 11 softmaxreluPrime(std::vector<float, std::allocator<float> > const&, int) 0.02 08 1084.14 48 0.21 442679 47487 519195026 0.87 47400 0.87 00 void std::vectorbasic_stringbuf<floatchar, std::allocatorchar_traits<float> >::_M_emplace_back_aux<floatchar>(float&&) 0.02 1084.31 0.17 13107321 12.98 12.98 void std::vector<float, std::allocator<floatchar> >::_M_emplace_back_aux<float const&>~basic_stringbuf(float const&) 0.01 02 1084.45 68 0.14 20 operator/(std::vectorbasic_stringbuf<floatchar, std::allocatorchar_traits<floatchar> > const&, float) 0.01 1084.58 0.13 462000 281.67 281.67 void std::vector<std::string, std::allocator<std::stringchar> >::_M_emplace_back_aux<std::string const&>(std::string const&) 0.01 1084.68 0.10 split~basic_stringbuf(std::string const&, char) 0.00 1084.68 0.00 3 1 0.00 0.00 std::vector<transpose(float*, std::allocator<float> >::vector(unsigned longint, std::allocator<float> const&int) 0.00 1084.68 0.00 1 0.00 0.00 _GLOBAL__sub_I__Z5printRKSt6vectorIfSaIfEEii
Call graph
 
granularity: each sample hit covers 2 byte(s) for 0.00% of 1084.68 seconds
index % time self children called name
<spontaneous>
[1] 9799.9 10612 1075.73 84 0.00 dotdisplayPrediction(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&, int, int, int) [1]
-----------------------------------------------
14012000 train(std::vector<float, std::allocator<float> >, std::vector<float, std::allocator<float> >, std::vector<float, std::allocator<float> >&, std::vector<float, std::allocator<float> >&, std::vector<spontaneousfloat, std::allocator<float> >&) [2] 1 2.4 15.23 60 0.00 transpose87 3/3 relu(std::vector<float*, int, intstd::allocator<float> > const&) [23]----------------------------------------------- <spontaneous>[32] 0.3 2 1.70 60 0.00 operator-87 3+14012000 train(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const, std::vector<float, std::allocator<float> >&) [3]----------------------------------------------- , std::vector<float, std::allocator<spontaneousfloat>[4] 0.1 0.56 0.97 reluPrime(>&, std::vector<float, std::allocator<float> > const&) [42] 0.82 87 0.00 491520000519195026/519195026 void std::vectorbasic_stringbuf<char, std::char_traits<floatchar>, std::allocator<floatchar> >::emplace_back<float>~basic_stringbuf(float&&) [7] 0.15 0.00 310000/442679 void 14012000 train(std::vector<float, std::allocator<float> >, std::_M_emplace_back_auxvector<float, std::allocator<float> >(, std::vector<float, std::allocator<float> >&&) [11]----------------------------------------------- , std::vector<float, std::allocator<spontaneousfloat> >[5] 0.1 1.48 0.00 operator*(float&, std::vector<float, std::allocator<float> > const&) [52]
-----------------------------------------------
<spontaneous>
[63] 0.1 3 10.33 00 03.01 47 relu(std::vector<float, std::allocator<float> > const&) [63] 02.00 60 0.00 30732187 3/13107321 void 3 train(std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float const&>(float const&) [12] 0.00 0.00 2075026/519195026 void std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) [7] 0.00 0.00 2679/442679 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float>(float&&) [11]----------------------------------------------- 0.00 0.00 2075026/519195026 relu(std::vector<float, std::allocator<float> > const&) [6] 0.04 0.00 25600000/519195026 softmax(std::vector<float, std::allocator<float> > const&, int) [9] 0.82 0.00 491520000/519195026 reluPrime(std::vector<float, std::allocator<float> > const&) [4][7] 0.1 0.87 0.00 519195026 void , std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) [72]
-----------------------------------------------
<spontaneous>
[84] 0.1 2 02.73 43 0.00 operator*split(std::vector<float, std::allocator<float> > string const&, std::vector<float, std::allocator<float> > const&char) [84]
-----------------------------------------------
<spontaneous>
[95] 0.1 01.30 62 0.27 00 softmaxprint(std::vector<float, std::allocator<float> > const&, int) [9] 0.17 0.00 12800000/13107321 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float const&>(float const&) [12] 0.06 0.00 130000/442679 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float>(float&&) [11] 0.04 0.00 25600000/519195026 void std::vector<float, std::allocator<float> >::emplace_back<float>(float&&int) [75]
-----------------------------------------------
<spontaneous>
[10] 0.0 0.10 0.13 split(std::string const&, char) [106] 0.13 0.00 462000/462000 void std::vector<std::string, std::allocator<std::string> >::_M_emplace_back_aux<std::string const&>(std::string const&) [14]----------------------------------------------- 0.00 1 01.00 2679/442679 relu(std::vector<float, std::allocator<float> > const&) [6] 0.06 11 0.00 130000/442679 softmax(std::vector<float, std::allocator<float> > const&, int) [9] 0.15 0.00 310000/442679 reluPrime(std::vector<float, std::allocator<float> > const&) [4][11] 0.0 0.21 0.00 442679 void std::vector<float, std::allocator<float> >::_M_emplace_back_aux<float>(float&&) [116]
-----------------------------------------------
0.00 87 0.00 307321519195026/13107321 519195026 relutrain(std::vector<float, std::allocator<float> > const, std::vector<float, std::allocator<float> >, std::vector<float, std::allocator<float> >&) [6] 0.17 0.00 12800000/13107321 softmax(, std::vector<float, std::allocator<float> > const&, intstd::vector<float, std::allocator<float> >&) [92][127] 0.0 1 0.17 87 0.00 13107321 519195026 void std::vectorbasic_stringbuf<char, std::char_traits<floatchar>, std::allocator<floatchar> >::_M_emplace_back_aux<float const&>~basic_stringbuf(float const&) [127]
-----------------------------------------------
<spontaneous>
[138] 0.0 0.14 20 0.00 operator/(std::vectorbasic_stringbuf<floatchar, std::allocatorchar_traits<float> char> const&, float) [13]----------------------------------------------- 0.13 0.00 462000/462000 split(std::string const&, char) [10][14] 0.0 0.13 0.00 462000 void std::vector<std::string, std::allocator<std::stringchar> >::_M_emplace_back_aux<std::string const&>~basic_stringbuf(std::string const&) [148]
-----------------------------------------------
0.00 0.00 31/3 1 random_vector(int) [28][22] 0.0 0.00 0.00 3 std::vector<float, std::allocator<float> >::vector(unsigned long, std::allocator<float> const&) [22]----------------------------------------------- 0.00 0.00 1/1 __libc_csu_init [3830][2316] 0.0 0.00 0.00 1 _GLOBAL__sub_I__Z5printRKSt6vectorIfSaIfEEii transpose(float*, int, int) [2316]
-----------------------------------------------
Index by function name
[23] _GLOBAL__sub_I__Z5printRKSt6vectorIfSaIfEEii (nn.cpp) [2] transpose(float*, int, int) [13] operator/(std::vector<float, std::allocator<float> > const&, float) [1] dotdisplayPrediction(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&, int, int, int) [14] void std::vector<std::string, std::allocator<std::string> >::_M_emplace_back_aux<std::string const&>(std::string const&) [32] operator-train(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&) [6] relu(, std::vector<float, std::allocator<float> > const&) [7] void std::vector<float, std::allocator<float> >::emplace_back<float>(float&&) [8] operator*(std::vector<float, std::allocator<float> > const&, std::vector<float, std::allocator<float> > const&) [107] split(std::string const&basic_stringbuf<char, char) [11] void std::vectorchar_traits<floatchar>, std::allocator<floatchar> >::_M_emplace_back_aux<float>~basic_stringbuf(float&&) [5] operator*print(float, std::vector<float, std::allocator<float> > const&, int, int) [96] softmaxreluPrime(std::vector<float, std::allocator<float> > const&, int) [128] void std::vectorbasic_stringbuf<char, std::char_traits<floatchar>, std::allocator<floatchar> >::_M_emplace_back_aux<float const&>~basic_stringbuf(float const&) [4] reluPrimesplit(std::vector<float, std::allocator<float> > string const&, char) [2216] std::vector<transpose(float*, std::allocator<float> >::vector(unsigned longint, std::allocator<float> const&int
|}
=====Analysis=====
The total execution time of the program is around 10 3 minutes. As is evident from The profiling results, most of spot displayPrediction as the function with maximum execution time is taken up by . However, thats because it displays a matrix using the ''dotnaive O(n2)'' for-loop. train() is the next function as it does matrix-matrix multiplicationwith the maximum time. This is the hotspot of for the program that can . If this function is made to be made efficient by doing this computation the kernel and other vector multiplications on the GPUfunctions that it calls as device functions, the program would fasten by a good proportion.
----
==== Sorting Algorithms - Merge Sort - Edgar Giang====
===How to run the program===
The following commands command was tested in puttymatrix:
g++ fileName.cpp -pg -o test
57
edits

Navigation menu