diff --git a/makefile b/makefile index 31a29da..4619878 100644 --- a/makefile +++ b/makefile @@ -5,6 +5,7 @@ itpp=${pkgs}/itpp/4.3.1/lib/ -litpp # INC_DIR=weilei_lib # CXX = g++ -Wall -g -m64 -std=c++0x ${opt} ${itpp} CXX = g++ -Wall -g -m64 -std=c++11 -pthread ${opt} ${itpp} +CXX = g++ -Wall -g -m64 -std=c++11 -fopenmp ${opt} ${itpp} CMD= -lm -lgmpxx -lgmp INC_DIR=~/working/weilei_lib @@ -39,6 +40,8 @@ my_bp3.out:my_bp3.c $(files) $(command) my_bp4.out:my_bp4.cpp $(files) $(command) +my_bp5.out:my_bp5.cpp $(files) + $(command) #bp_decoding3.out:bp_decoding3.c mm_read.c mm_read.h mmio.c mmio.h mm_write.c mm_write.h lib.cpp lib.h my_lib.h makefile bp_decoding3.out:bp_decoding3.c $(files) $(command) diff --git a/my_bp5.cpp b/my_bp5.cpp new file mode 100644 index 0000000..e04379c --- /dev/null +++ b/my_bp5.cpp @@ -0,0 +1,303 @@ +//Weilei Apr 6, 2020 +// copied from my_bp1.c +//#include +#include +#include +#include +#include +#include +#include +#include "my_lib.h" +#include +#include "bp_decoder.h" +using namespace std; +using namespace itpp; + + + +// Read the code from files and do BP decoding +//input:source file for stabilzier matrix; error propability p ; + +int decode( BP_Decoder, GF2mat G, GF2mat H, double p, mat * data, int col_index, int row_index, int cycles, int feedback, double time_out, int num_data_points); + +int main(int argc, char **argv){ + Parser parser; + parser.init(argc,argv); + //p.set_silentmode(true); + + string filename_G, filename_H; + //parser.get(filename_G,"filename_G"); + Real_Timer timer; double remained_time; //remained time for each size + //vector> pool; + int cores=32; parser.get(cores, "cores"); + //vector>::size_type pool_size = cores+2; //max number of threads, 15 with decreasing size for best performace + //std::chrono::milliseconds span (100); + //std::chrono::milliseconds final_thread_time (10000);//10 secs before prelimilary result print + string filename_data; + filename_data="gnuplot/result/my-bp5-test.gnudat"; + parser.get(filename_data,"filename_data"); + + int feedback=5; parser.get(feedback,"feedback"); + // int cycles = 1000000;//70 sec for 2,000,000 + // double time_out=200;//time out in seconds for each data points (p and size) + int num_data_points = 100;//data entry for each data points + parser.get(num_data_points, "num_data_points"); + //change parameter p, code size + double p; + //parser.get(p,"p"); + // int sizes[]= {13,11,9,7,5}; + int sizes[]= {13,9,5}; + int size_of_sizes = sizeof(sizes)/sizeof(*sizes); + string stabilizer_folder="data/toric/stabilizer"; + double ip_begin=-0.7; + double ip_end=-1.8; + int data_rows = (ip_begin-ip_end)/0.1; + mat data(data_rows,5 * size_of_sizes ); //return result in a mat, 5 columns for each size. format defines in header + data.zeros(); + + int exit_iteration=9; parser.get(exit_iteration,"exit_iteration"); + int schedule_mode=4; parser.get(schedule_mode, "schedule_mode"); + + //split tasks into smaller chunks + int chunk_num_data_points=10;//number of data points in each chunk + int chunk_size=num_data_points/chunk_num_data_points; //number of chunks for each p and size + int chunk_num_for_each_size = chunk_size*((ip_begin-ip_end)/0.1); + + //timeout should be 5 times longer in hpcc + double chunk_time_out = 100.0;//time_out/chunk_size; + parser.get(chunk_time_out,"chunk_time_out"); + int chunk_cycles=chunk_num_data_points*1000;//1000 for prob 1/1000 + + mat chunk_data(data_rows*chunk_size,5*5); + chunk_data.zeros(); + + + int col_index=-5; + for ( int size : sizes){ + // cout<<"size = "< ip_end;ip-=0.1){ + //cout<<"ip = "<= pool_size ){ + //wait until some of them finish + //break; + for(vector > :: iterator it = pool.begin(); it != pool.end(); ++it){ + //printf("%d", *it); + if ( it->wait_for(span) == future_status::ready){ + // cout<<"."<> :: iterator it = pool.begin(); it != pool.end(); ++it){ + it->get(); + }*/ + + //process chunk_data to data + int temp_index; + double value; + for ( int i =0; i