diff --git a/cpprevolve/CMakeLists.txt b/cpprevolve/CMakeLists.txt index 4b57545b53..78f48af011 100644 --- a/cpprevolve/CMakeLists.txt +++ b/cpprevolve/CMakeLists.txt @@ -76,6 +76,10 @@ set(LIMBO_DIR ../thirdparty/limbo) set(LIMBO_DEFINES USE_NLOPT) include_directories(${LIMBO_DIR}/src) +# Find galgo library for EA +set(GALGO_DIR ../thirdparty/galgo) +include_directories(${GALGO_DIR}) + # Find GSL - GNU Scientific Library find_package(GSL REQUIRED) include_directories(${GSL_INCLUDE_DIRS}) @@ -87,16 +91,24 @@ include_directories(${YAML_CPP_INCLUDE_DIR}) # Find Gazebo # LOCAL_GAZEBO_DIR can be set to a path with a gazebo-config.cmake if (LOCAL_GAZEBO_DIR) - find_package(gazebo 9 REQUIRED CONFIG + find_package(gazebo 10 REQUIRED CONFIG PATHS "${LOCAL_GAZEBO_DIR}" NO_DEFAULT_PATH) message(WARNING "Using local Gazebo @ ${gazebo_DIR}") else() - find_package(gazebo 9 REQUIRED) + find_package(gazebo 10 REQUIRED) endif() include_directories(${GAZEBO_INCLUDE_DIRS}) link_directories(${GAZEBO_LIBRARY_DIRS}) +pkg_check_modules(libavcodec libavcodec) +if (NOT libavcodec_FOUND) + BUILD_WARNING("libavcodec not found") +else() + include_directories(${libavcodec_INCLUDE_DIRS}) + link_directories(${libavcodec_LIBRARY_DIRS}) +endif () + # Find Protobuf # TODO: This part is currently a mess, and it should be handeled better find_package(Protobuf REQUIRED) diff --git a/cpprevolve/revolve/gazebo/brains/DifferentialCPG.cpp b/cpprevolve/revolve/gazebo/brains/DifferentialCPG.cpp index 88d4a69bc1..e110f82f41 100644 --- a/cpprevolve/revolve/gazebo/brains/DifferentialCPG.cpp +++ b/cpprevolve/revolve/gazebo/brains/DifferentialCPG.cpp @@ -42,6 +42,11 @@ #include #include +#include +#include +#include +//#include + // Project headers #include "../motors/Motor.h" @@ -64,6 +69,49 @@ using Init_t = limbo::init::LHS; using Kernel_t = limbo::kernel::MaternFiveHalves; using GP_t = limbo::model::GP; +///**************** configuration galgo real value ***************/// +template +void set_my_config(galgo::ConfigInfo<_TYPE>& config, double mutrate) +{ + // override some defaults + config.mutinfo._sigma = 1.0; + config.mutinfo._sigma_lowest = 0.001; //cannot too small that change nothing + config.mutinfo._ratio_boundary = 0.10; + //ea: 0.4, 0.6, boea: + config.covrate = 0.4; // 0.0 if no cros-over + config.mutrate = mutrate; // mutation rate usually is 1.0 for real-valued + config.recombination_ratio = 0.60; //Real Valued crossover ratio, can't be 0.5 because 0.5 will generate two same offsprings after Xover + + config.elitpop = 1; + config.tntsize = 2; //k-tournament size k=2/4, higher value higher pressure + config.Selection = TNT; + config.CrossOver = RealValuedSimpleArithmeticRecombination; + config.mutinfo._type = galgo::MutationType::MutationGAM_UncorrelatedNStepSizeBoundary; + + config.popsize = 20; + config.nbgen = 125; //The num of gens for EA, 131+19, 125+25, 122+28 + config.output = true; +} + +template +class objective_function +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 20; + // auto xx = x; + // //transfer interval from [0, 1] to [-3, 3] [-2, 2] [-5, 5] + // for (int i = 0; i < dim_in; i++) + // xx[i] = 4. * x[i] - 2.; + // double f = 10. * dim_in; + // for (size_t i = 0; i < dim_in; ++i) + // f += xx[i] * xx[i] - 10. * std::cos(2 * M_PI * xx[i]); + return {0}; //maximum = 0 with (0, 0, 0, 0); + } +}; + + /** * Constructor for DifferentialCPG class. * @@ -121,21 +169,23 @@ DifferentialCPG::DifferentialCPG( this->signal_factor_mid = std::stod(controller->GetAttribute("signal_factor_mid")->GetAsString()); this->signal_factor_left_right = std::stod(controller->GetAttribute("signal_factor_left_right")->GetAsString()); - // Limbo BO Learner parameters - this->kernel_noise_ = std::stod(learner->GetAttribute("kernel_noise")->GetAsString()); - this->kernel_optimize_noise_ = std::stoi(learner->GetAttribute("kernel_optimize_noise")->GetAsString()); - this->kernel_sigma_sq_ = std::stod(learner->GetAttribute("kernel_sigma_sq")->GetAsString()); - this->kernel_l_ = std::stod(learner->GetAttribute("kernel_l")->GetAsString()); - this->kernel_squared_exp_ard_k_ = std::stoi(learner->GetAttribute("kernel_squared_exp_ard_k")->GetAsString()); - this->acqui_gpucb_delta_ = std::stod(learner->GetAttribute("acqui_gpucb_delta")->GetAsString()); - this->acqui_ucb_alpha_ = std::stod(learner->GetAttribute("acqui_ucb_alpha")->GetAsString()); - this->acqui_ei_jitter_ = std::stod(learner->GetAttribute("acqui_ei_jitter")->GetAsString()); - - // Non-limbo BO learner para - this->n_init_samples = std::stoi(learner->GetAttribute("n_init_samples")->GetAsString()); - this->n_learning_iterations = std::stoi(learner->GetAttribute("n_learning_iterations")->GetAsString()); - this->n_cooldown_iterations = std::stoi(learner->GetAttribute("n_cooldown_iterations")->GetAsString()); - this->init_method = learner->GetAttribute("init_method")->GetAsString(); + // Limbo BO Learner parameters + this->kernel_noise_ = std::stod(learner->GetAttribute("kernel_noise")->GetAsString()); + this->kernel_optimize_noise_ = std::stoi(learner->GetAttribute("kernel_optimize_noise")->GetAsString()); + this->kernel_sigma_sq_ = std::stod(learner->GetAttribute("kernel_sigma_sq")->GetAsString()); + this->kernel_l_ = std::stod(learner->GetAttribute("kernel_l")->GetAsString()); + this->kernel_squared_exp_ard_k_ = std::stoi(learner->GetAttribute("kernel_squared_exp_ard_k")->GetAsString()); + this->acqui_gpucb_delta_ = std::stod(learner->GetAttribute("acqui_gpucb_delta")->GetAsString()); + this->acqui_ucb_alpha_ = std::stod(learner->GetAttribute("acqui_ucb_alpha")->GetAsString()); + this->acqui_ei_jitter_ = std::stod(learner->GetAttribute("acqui_ei_jitter")->GetAsString()); + this->gaussian_step_size_ = std::stod(learner->GetAttribute("gaussian_step_size")->GetAsString()); + this->mutrate_ = std::stod(learner->GetAttribute("mutrate")->GetAsString()); + + // Non-limbo BO learner para + this->n_init_samples = std::stoi(learner->GetAttribute("n_init_samples")->GetAsString()); + this->n_learning_iterations = std::stoi(learner->GetAttribute("n_learning_iterations")->GetAsString()); + this->n_cooldown_iterations = std::stoi(learner->GetAttribute("n_cooldown_iterations")->GetAsString()); + this->init_method = learner->GetAttribute("init_method")->GetAsString(); // Meta parameters this->startup_time = std::stoi(controller->GetAttribute("startup_time")->GetAsString()); @@ -347,11 +397,25 @@ DifferentialCPG::DifferentialCPG( { std::cout << "Don't load existing brain" << std::endl; } - - // Initialize BO - this->bo_init_sampling(); + if(learner_algorithm == "BO") + { + // Initialize BO + this->bo_init_sampling(); + } + if(learner_algorithm == "EA") + { + // Initialize EA. create initial samples + this->ea_init_sampling(); + } + if(learner_algorithm == "BOEA") + { + this->bo_init_sampling(); + } } + // Save parameters + this->save_parameters(); + // Initiate the cpp Evaluator this->evaluator.reset(new Evaluator(this->evaluation_rate)); this->evaluator->directory_name = this->directory_name; @@ -371,17 +435,120 @@ DifferentialCPG::~DifferentialCPG() * Dummy function for limbo */ struct DifferentialCPG::evaluation_function{ + // TODO: Make this neat. I don't know how though. // Number of input dimension (samples.size()) - BO_PARAM(size_t, dim_in, 18); + //spider9:18,spider13:26,spider17:34,gecko7:13,gecko12:23,gecko17:33,babyA:16,babyB:22,babyC:32,one+:12 + BO_PARAM(size_t, dim_in, 16); // number of dimensions of the fitness BO_PARAM(size_t, dim_out, 1); Eigen::VectorXd operator()(const Eigen::VectorXd &x) const { - return limbo::tools::make_vector(0); + return limbo::tools::make_vector(0); }; }; +/** + * Performs the initial random sampling for EA + */ +void DifferentialCPG::ea_init_sampling(){ + if(this->verbose) + { + // We only want to optimize the weights for now. + std::cout << "Number of weights = connections/2 + n_motors are " + << this->connections.size()/2 + << " + " + << this->n_motors + << std::endl; + } + + // Random sampling + if(this->init_method == "RS") + { + std::cout << "This is RS ...." << std::endl; + for (size_t i = 0; i < this->n_init_samples; i++) + { + // Working variable to hold a random number for each weight to be optimized + Eigen::VectorXd init_sample(this->n_weights); + + // For all weights + for (size_t j = 0; j < this->n_weights; j++) + { + // Generate a random number in [0, 1]. Transform later + double f = ((double) rand() / (RAND_MAX)); + + // Append f to vector + init_sample(j) = f; + } + + // Save vector in samples. + this->samples.push_back(init_sample); + } + } + // Latin Hypercube Sampling + else if(this->init_method == "LHS") + { + std::cout << "This is LHS ...." << std::endl; + // Working variable + double my_range = 1.f/this->n_init_samples; + + // If we have n dimensions, create n such vectors that we will permute + std::vector> all_dimensions; + + // Fill vectors + for (size_t i=0; i < this->n_weights; i++) + { + std::vector one_dimension; + + // Prepare for vector permutation + for (size_t j = 0; j < this->n_init_samples; j++) + { + one_dimension.push_back(j); + } + + // Vector permutation + std::random_shuffle(one_dimension.begin(), one_dimension.end() ); + + // Save permuted vector + all_dimensions.push_back(one_dimension); + } + + // For all samples + for (size_t i = 0; i < this->n_init_samples; i++) + { + // Initialize Eigen::VectorXd here. + Eigen::VectorXd init_sample(this->n_weights); + + // For all dimensions + for (size_t j = 0; j < this->n_weights; j++) + { + // Take a LHS + init_sample(j) = all_dimensions.at(j).at(i)*my_range + ((double) rand() / (RAND_MAX))*my_range; + } + + // Append sample to samples + this->samples.push_back(init_sample); + } + } + else + { + std::cout << "Please provide a choice of init_method in {LHS, RS}" << std::endl; + } + + // Print samples + if(this->verbose) + { + for(auto init_sample :this->samples) + { + for (int h = 0; h < init_sample.size(); h++) + { + std::cout << init_sample(h) << ", "; + } + std::cout << std::endl; + } + } +} + /** * Performs the initial random sampling for BO */ @@ -396,8 +563,7 @@ void DifferentialCPG::bo_init_sampling(){ << std::endl; // Information purposes - std::cout << std::endl << "Sample method: " << this->init_method << ". Initial " - "samples are: " << std::endl; + std::cout << std::endl << "Sample method: " << this->init_method << ". Initial samples are: " << std::endl; } // Random sampling @@ -409,6 +575,9 @@ void DifferentialCPG::bo_init_sampling(){ Eigen::VectorXd init_sample(this->n_weights); // For all weights + srand((unsigned)time(NULL)); + // trash first one, because it could be the same and I do not know why + auto trash_first = rand(); for (size_t j = 0; j < this->n_weights; j++) { // Generate a random number in [0, 1]. Transform later @@ -417,7 +586,6 @@ void DifferentialCPG::bo_init_sampling(){ // Append f to vector init_sample(j) = f; } - // Save vector in samples. this->samples.push_back(init_sample); } @@ -489,11 +657,24 @@ void DifferentialCPG::bo_init_sampling(){ * Function that obtains the current fitness by calling the evaluator and stores it */ void DifferentialCPG::save_fitness(){ - // Get fitness +// std::cout << "save_fitness().... "; + // calculate the fitness function double fitness = this->evaluator->Fitness(); - + //filter the noise in evaluation, keep the elitism to next generation + if((learner_algorithm == "EA") or (learner_algorithm == "BOEA" and (this->current_iteration > switch_num))) + { + if((this->current_iteration % pop_size == 1) and (this->current_iteration not_eq 1)) + { + std::cout << "current_iteration: " << this->current_iteration + << " fitness: " << fitness << std::endl; + if(fitness < this->best_fitness) + { + fitness = 0.5 * (fitness + this->best_fitness); + } + } + } // Save sample if it is the best seen so far - if(fitness >this->best_fitness) + if(fitness > this->best_fitness) { this->best_fitness = fitness; this->best_sample = this->samples.back(); @@ -501,8 +682,8 @@ void DifferentialCPG::save_fitness(){ if (this->verbose) { - std::cout << "Iteration number " << this->current_iteration << " has fitness " << - fitness << ". Best fitness: " << this->best_fitness << std::endl; + std::cout << "Iteration: " << this->current_iteration << ", fitness " << + fitness << ", Best fitness: " << this->best_fitness << std::endl; } // Limbo requires fitness value to be of type Eigen::VectorXd @@ -515,11 +696,67 @@ void DifferentialCPG::save_fitness(){ // Write fitness to file std::ofstream fitness_file; fitness_file.open(this->directory_name + "fitnesses.txt", std::ios::app); - fitness_file << fitness << std::endl; + fitness_file << std::fixed << fitness << std::endl; + fitness_file.flush(); fitness_file.close(); } - +/** + * Wrapper function that makes calls to limbo to solve the current BO + * iteration and returns the best sample + */ +void DifferentialCPG::ea_step(){ + // CONFIG + using _TYPE = float; //float, double, char, int, long + const int NBIT = 64; // Has to remain between 1 and 64, 32:float + galgo::ConfigInfo<_TYPE> config; // A new instance of config get initial defaults + set_my_config<_TYPE>(config, this->mutrate_); // Override some defaults + + // initializing parameters lower and upper bounds + galgo::Parameter< _TYPE, NBIT > par1({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par2({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par3({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par4({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par5({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par6({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par7({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par8({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par9({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par10({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par11({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par12({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par13({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par14({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par15({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par16({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par17({(_TYPE)0.0, (_TYPE)1.0}); + galgo::Parameter< _TYPE, NBIT > par18({(_TYPE)0.0, (_TYPE)1.0}); + // galgo::Parameter< _TYPE, NBIT > par19({(_TYPE)0.0, (_TYPE)1.0}); + // galgo::Parameter< _TYPE, NBIT > par20({(_TYPE)0.0, (_TYPE)1.0}); + + galgo::GeneticAlgorithm< _TYPE > ga(config, par1, par2, par3, par4, par5, par6, par7, par8, par9, par10, par11, par12, par13, par14, par15, par16, + par17, + par18 + // par19, + // par20 + ); + ga.run(this->learner_algorithm, this->current_iteration, this->gaussian_step_size_, this->switch_num, this->samples, this->observations); + + // clear old samples and observations before pass new individuals to samples + this->samples.clear(); + this->observations.clear(); + + //return the populations to samples one by one + std::cout << "The new samples from EA after ga.run() .... " << std::endl; + Eigen::VectorXd x; + for(int i = 0; i < pop_size; i++) + { + x = ga.get_sample(i); //return the last sample in _samples + // Save this x_hat_star, samples: All samples seen so far + this->samples.push_back(x); +// std::cout << "samples[" << i << "]: " << samples[i] << std::endl; + } +} /** * Struct that holds the parameters on which BO is called. This is required @@ -641,15 +878,13 @@ void DifferentialCPG::bo_step(){ this->observations); x = boptimizer.last_sample(); - // Write parametesr to verify thread-stability after the run - std::ofstream dyn_parameters_file; - dyn_parameters_file.open(this->directory_name + "dynamic_parameters.txt", std::ios::app); - dyn_parameters_file << Params::acqui_ucb::alpha() << ","; - dyn_parameters_file << Params::kernel_maternfivehalves::sigma_sq() << ","; - dyn_parameters_file << Params::kernel_maternfivehalves::l() << std::endl; - dyn_parameters_file.close(); - - + // Write parametesr to verify thread-stability after the run + std::ofstream dyn_parameters_file; + dyn_parameters_file.open(this->directory_name + "dynamic_parameters.txt", std::ios::app); + dyn_parameters_file << Params::acqui_ucb::alpha() << ","; + dyn_parameters_file << Params::kernel_maternfivehalves::sigma_sq() << ","; + dyn_parameters_file << Params::kernel_maternfivehalves::l() << std::endl; + dyn_parameters_file.close(); } // else if(this->acquisition_function == "GP_UCB") // { @@ -724,16 +959,17 @@ void DifferentialCPG::Update( // this->evaluator->Update(this->robot->WorldPose(), _time, _step); this->start_fitness_recording = false; } - // Evaluate policy on certain time limit, or if we just started + // Evaluate policy on certain time limit, or if we just started. run at the end of 60s if ((elapsed_evaluation_time > this->evaluation_rate) or ((_time - _step) < 0.001)) { // Update position // this->evaluator->Update(this->robot->WorldPose(), _time, _step); this->start_fitness_recording = true; - // Get and save fitness (but not at start) + // Get and save fitness (but not at start): (_time > _step=0.125) if(not (_time - _step < 0.001 )) { + // obtains the current fitness by calling the evaluator and stores it this->save_fitness(); } @@ -753,7 +989,6 @@ void DifferentialCPG::Update( { this->reset_neuron_state(); } - // If we are still learning if(this->current_iteration < this->n_init_samples + this->n_learning_iterations) { @@ -761,23 +996,63 @@ void DifferentialCPG::Update( { if (this->current_iteration < this->n_init_samples) { - std::cout << std::endl << "Evaluating initial random sample" << std::endl; + std::cout << "Evaluating initial random sample" << std::endl; } else { - std::cout << std::endl << "I am learning " << std::endl; + std::cout << "I am learning " << std::endl; } } - // Get new sample (weights) and add sample - this->bo_step(); - - // Set new weights - this->set_ode_matrix(); + if(learner_algorithm == "BO") + { + // Get new sample (weights) and add sample +// if (this->current_iteration >= this->n_init_samples) +// { + this->bo_step(); //return new sample to samples +// } + // Set new weights + this->set_ode_matrix(); + } + if(learner_algorithm == "EA") + { + //only run EA to generate new populations once each popsize + if((this->current_iteration % pop_size == 0) and (this->current_iteration not_eq 0)) + { + // Get new samples (weights) (return all individuals to samples) + this->ea_step(); + } + //Set new weights (in a sample) specifies weight from neuron i to neuron j + this->set_ode_matrix(); + } + if(learner_algorithm == "BOEA") + { + // run BO +// if((this->current_iteration >= this->n_init_samples) and (this->current_iteration < switch_num)) + if(this->current_iteration < switch_num) + { + this->bo_step(); + } + // run EA + if(this->current_iteration >= switch_num) + { + if(this->current_iteration % pop_size == 0) + { + std::cout << "current_iteration: " << this->current_iteration << std::endl; + this->ea_step(); + } + } + // update the controller + this->set_ode_matrix(); + } + if(learner_algorithm == "HyperNEAT") + { + ////TODO + } // Update position // this->evaluator->Update(this->robot->WorldPose(), _time, _step); } - // If we are finished learning but are cooling down - reset once + // If we are finished learning but are cooling down - reset once else if((this->current_iteration >= (this->n_init_samples + this->n_learning_iterations)) and (this->current_iteration < (this->n_init_samples + @@ -820,6 +1095,20 @@ void DifferentialCPG::Update( this->start_time = _time; this->evaluator->Reset(); this->current_iteration += 1; + + // get the end time of an evaluation and learning process + if(not (_time - _step < 0.001 )) + { + gettimeofday(&timeEnd,NULL); + timeDiff = timeEnd.tv_sec - timeStart.tv_sec + 0.000001 * (timeEnd.tv_usec - timeStart.tv_usec); + // Write overhead time to file + std::ofstream ctime_file; + ctime_file.open(this->directory_name + "ctime.txt", std::ios::app); + ctime_file << std::fixed << timeDiff << std::endl; + ctime_file.close(); + } + // get the starting time of an evaluation + gettimeofday(&timeStart,NULL); } // Send new signals to the motors @@ -827,9 +1116,12 @@ void DifferentialCPG::Update( p = 0; for (const auto &motor: _motors) { +// this->output[p] = -1; motor->Update(this->output + p, _step); +// std::cout << '\t' << *(this->output+p); p += motor->Outputs(); } +// std::cout << std::endl; } /** @@ -851,9 +1143,33 @@ void DifferentialCPG::set_ode_matrix(){ } matrix.push_back(row); } - // Process A<->B connections - int index = 0; + auto index = 0; + + auto num_sample = 0; + if(this->learner_algorithm == "BO") + { + num_sample = this->current_iteration; + } + if(this->learner_algorithm == "EA") + { + num_sample = this->current_iteration % this->pop_size; + } + if(this->learner_algorithm == "BOEA") + { + if(this->current_iteration < switch_num) + { + num_sample = this->current_iteration; + } + if(this->current_iteration >= switch_num) + { + num_sample = this->current_iteration % this->pop_size; + } + } +// std::cout << "current_iteration: " << this->current_iteration +// << " num_sample: " << num_sample << std::endl; +// std::cout << "samples[" << num_sample << "]: " +// << this->samples[num_sample] << std::endl; for(size_t i =0; i neurons.size(); i++) { // Get correct index @@ -864,15 +1180,15 @@ void DifferentialCPG::set_ode_matrix(){ else{ c = i - 1; } - // Add a/b connection weight index = (int)(i/2); - auto w = this->samples.at(this->current_iteration)(index) * - (this->range_ub - this->range_lb) + this->range_lb; + +// std::cout << "i: " << i << " index: " << index << std::endl; +// std::cout << this->samples.at(num_sample)(index) << " "; + auto w = this->samples.at(num_sample)(index) * (this->range_ub - this->range_lb) + this->range_lb; matrix[i][c] = w; matrix[c][i] = -w; } - // A<->A connections index++; int k = 0; @@ -924,7 +1240,7 @@ void DifferentialCPG::set_ode_matrix(){ } // Get weight - auto w = this->samples.at(this->current_iteration)(index + k) * + auto w = this->samples.at(num_sample)(index + k) * (this->range_ub - this->range_lb) + this->range_lb; // Set connection in weight matrix @@ -942,7 +1258,7 @@ void DifferentialCPG::set_ode_matrix(){ // Save this sample to file std::ofstream samples_file; samples_file.open(this->directory_name + "samples.txt", std::ios::app); - auto sample = this->samples.at(this->current_iteration); + auto sample = this->samples.at(num_sample); for(size_t j = 0; j < this->n_weights; j++) { samples_file << sample(j) << ", "; @@ -951,7 +1267,6 @@ void DifferentialCPG::set_ode_matrix(){ samples_file.close(); } - /** * Set states back to original value (that is on the unit circle) */ @@ -1056,7 +1371,7 @@ void DifferentialCPG::step( this->next_state[i] = x[i]; } - // Loop over all neurons to actually update their states. Note that this is a new outer for loop + // Loop over all neurons to actually update their states. Note that this is a new outer for loop auto i = 0; auto j = 0; for (auto &neuron : this->neurons) { @@ -1081,7 +1396,6 @@ void DifferentialCPG::step( // Use frame of reference if(use_frame_of_reference) { - if (std::abs(frame_of_reference) == 1) { this->output[j] = this->signal_factor_left_right*this->abs_output_bound*((2.0)/(1.0 + std::pow(2.718, -2.0*x/this->abs_output_bound)) -1); @@ -1165,7 +1479,10 @@ void DifferentialCPG::save_parameters(){ parameters_file << "EXP-ARD Kernel k: "<< Params::kernel_squared_exp_ard::k() << std::endl; parameters_file << "EXP-ARD Kernel sigma_sq: "<< Params::kernel_squared_exp_ard::sigma_sq() << std::endl; parameters_file << "MFH Kernel sigma_sq: "<< Params::kernel_maternfivehalves::sigma_sq() << std::endl; - parameters_file << "MFH Kernel l: "<< Params::kernel_maternfivehalves::l() << std::endl << std::endl; + parameters_file << "MFH Kernel l: "<< Params::kernel_maternfivehalves::l() << std::endl; + parameters_file << "gaussian_step_size: "<< this->gaussian_step_size_ << std::endl; + parameters_file << "mutrate: "<< this->mutrate_ << std::endl << std::endl; + parameters_file.close(); } diff --git a/cpprevolve/revolve/gazebo/brains/DifferentialCPG.h b/cpprevolve/revolve/gazebo/brains/DifferentialCPG.h index 2595f83621..95552a06c3 100644 --- a/cpprevolve/revolve/gazebo/brains/DifferentialCPG.h +++ b/cpprevolve/revolve/gazebo/brains/DifferentialCPG.h @@ -142,9 +142,15 @@ namespace revolve /// \brief Init BO loop public: void bo_init_sampling(); + /// \brief Init EA loop + public: void ea_init_sampling(); + /// \brief Main BO loop public: void bo_step(); + /// \brief Main EA loop + public: void ea_step(); + /// \brief evaluation rate private: double evaluation_rate; @@ -169,6 +175,12 @@ namespace revolve /// \brief Starting time private: double start_time; + /// \brief time interval evaluating and learning for each iteration + private: double timeDiff; + + /// \brief the time stamp of starting and end. + private: struct timeval timeStart, timeEnd; + /// \brief BO attributes private: size_t current_iteration = 0; @@ -178,6 +190,15 @@ namespace revolve /// \brief Number of initial samples private: size_t n_init_samples; + /// \brief Population size of Evolutionary Algorithm + private: size_t pop_size = 20; + + /// \brief Population size of Evolutionary Algorithm + private: size_t switch_num = 300; + + /// \brief The type of learning algorithm + private: std::string learner_algorithm = "EA"; + /// \brief Cool down period private: size_t n_cooldown_iterations; @@ -259,15 +280,20 @@ namespace revolve /// \brief Use frame of reference {-1,0,1} version or not private: bool use_frame_of_reference; - // BO Learner parameters - private: double kernel_noise_; - private: bool kernel_optimize_noise_; - public: double kernel_sigma_sq_; - public: double kernel_l_; - private: int kernel_squared_exp_ard_k_; - private: double acqui_gpucb_delta_ ; - public: double acqui_ucb_alpha_; - private: double acqui_ei_jitter_; + // BO Learner parameters + private: double kernel_noise_; + private: bool kernel_optimize_noise_; + public: double kernel_sigma_sq_; + public: double kernel_l_; + private: int kernel_squared_exp_ard_k_; + private: double acqui_gpucb_delta_ ; + public: double acqui_ucb_alpha_; + private: double acqui_ei_jitter_; + + //EA Learner parameters + public: double gaussian_step_size_; + public: double mutrate_; + }; } } diff --git a/cpprevolve/revolve/gazebo/brains/DifferentialCPG_BO.h b/cpprevolve/revolve/gazebo/brains/DifferentialCPG_BO.h index c859415630..3e19160d28 100644 --- a/cpprevolve/revolve/gazebo/brains/DifferentialCPG_BO.h +++ b/cpprevolve/revolve/gazebo/brains/DifferentialCPG_BO.h @@ -19,6 +19,8 @@ #include #include +#include + namespace limbo { namespace defaults { struct bayes_opt_boptimizer { @@ -86,61 +88,59 @@ namespace limbo { template void optimize(const StateFunction& sfun, std::vector all_samples, std::vector all_observations, const AggregatorFunction& afun = AggregatorFunction(), bool reset = true) { - this->_init(sfun, afun, reset); //reset - - // Maarten: set observations and samples - this->_samples = all_samples; - this->_observations = all_observations; - - if (!this->_observations.empty()) { - _model.compute(this->_samples, this->_observations); - } - else { - std::cout << "OBSERVATION SET IS EMPTY \n"; - _model = model_t(StateFunction::dim_in(), StateFunction::dim_out()); - } - acqui_optimizer_t acqui_optimizer; + this->_init(sfun, afun, reset); //reset - struct timeval timeStart, timeEnd; - double timeDiff; + // Maarten: set observations and samples + this->_samples = all_samples; + this->_observations = all_observations; - while (!this->_stop(*this, afun)) { + if (!this->_observations.empty()) { + _model.compute(this->_samples, this->_observations); + } + else { + std::cout << "OBSERVATION SET IS EMPTY \n"; + _model = model_t(StateFunction::dim_in(), StateFunction::dim_out()); + } + acqui_optimizer_t acqui_optimizer; - gettimeofday(&timeStart,NULL); + struct timeval timeStart, timeEnd; + double timeDiff; - acquisition_function_t acqui(_model, this->_current_iteration); + while (!this->_stop(*this, afun)) { + gettimeofday(&timeStart,NULL); + acquisition_function_t acqui(_model, this->_current_iteration); - auto acqui_optimization = - [&](const Eigen::VectorXd& x, bool g) { return acqui(x, afun, g); }; - Eigen::VectorXd starting_point = tools::random_vector(StateFunction::dim_in(), Params::bayes_opt_bobase::bounded()); + auto acqui_optimization = + [&](const Eigen::VectorXd& x, bool g) { return acqui(x, afun, g); }; + Eigen::VectorXd starting_point = tools::random_vector(StateFunction::dim_in(), Params::bayes_opt_bobase::bounded()); - // new samples are from the acquisition optimizer - Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, starting_point, Params::bayes_opt_bobase::bounded()); + // new samples are from the acquisition optimizer + Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, starting_point, Params::bayes_opt_bobase::bounded()); - ///Evaluate a sample and add the result to the 'database'(sample/observations vectors)--it does not update the model - this->eval_and_add(sfun, new_sample); + ///Evaluate a sample and add the result to the 'database'(sample/observations vectors)--it does not update the model + this->eval_and_add(sfun, new_sample); //add new_sample to _sample and sfun to _observations in bo_base.hpp - this->_update_stats(*this, afun); + this->_update_stats(*this, afun); - _model.add_sample(this->_samples.back(), this->_observations.back()); + _model.add_sample(this->_samples.back(), this->_observations.back()); - if (Params::bayes_opt_boptimizer::hp_period() > 0 - && (this->_current_iteration + 1) % Params::bayes_opt_boptimizer::hp_period() == 0) - _model.optimize_hyperparams(); + if (Params::bayes_opt_boptimizer::hp_period() > 0 + && (this->_current_iteration + 1) % Params::bayes_opt_boptimizer::hp_period() == 0) + _model.optimize_hyperparams(); - this->_current_iteration++; - this->_total_iterations++; + this->_current_iteration++; + this->_total_iterations++; - gettimeofday(&timeEnd,NULL); + gettimeofday(&timeEnd,NULL); - timeDiff = 1000000 * (timeEnd.tv_sec - timeStart.tv_sec) - + timeEnd.tv_usec - timeStart.tv_usec; //tv_sec: value of second, tv_usec: value of microsecond - timeDiff/=1000; + timeDiff = 1000000 * (timeEnd.tv_sec - timeStart.tv_sec) + + timeEnd.tv_usec - timeStart.tv_usec; //tv_sec: value of second, tv_usec: value of microsecond + timeDiff/=1000; - std::ofstream ctime; - ctime.open("../ctime.txt", std::ios::app); - ctime << std::fixed << timeDiff << std::endl; - } +// std::ofstream ctime; +// ctime.open("../ctime.txt", std::ios::app); +// ctime << std::fixed << timeDiff << std::endl; + } } /// return the best observation so far (i.e. max(f(x))) @@ -165,7 +165,7 @@ namespace limbo { /// Return a reference to the last sample. Used for implementation with revolve const Eigen::VectorXd& last_sample(){ - return this->_samples.back(); + return this->_samples.back(); //back(): return the last elelment of _samples } const model_t& model() const { return _model; } diff --git a/cpprevolve/revolve/gazebo/brains/Evaluator.cpp b/cpprevolve/revolve/gazebo/brains/Evaluator.cpp index e082cd1596..a1369e0662 100644 --- a/cpprevolve/revolve/gazebo/brains/Evaluator.cpp +++ b/cpprevolve/revolve/gazebo/brains/Evaluator.cpp @@ -20,7 +20,7 @@ */ #include - +#include_next #include "Evaluator.h" using namespace revolve::gazebo; @@ -47,7 +47,7 @@ Evaluator::Evaluator(const double _evaluationRate, this->current_position_.Reset(); this->previous_position_.Reset(); this->start_position_.Reset(); - this->locomotion_type = "directed"; // {directed, gait} + this->locomotion_type = "gait"; // {directed, gait} this->path_length = 0.0; } @@ -69,18 +69,34 @@ double Evaluator::Fitness() double fitness_value = 0.0; if(this->locomotion_type == "gait") { - double dS; - dS = std::sqrt(std::pow(this->previous_position_.Pos().X() - - this->current_position_.Pos().X(), 2) + - std::pow(this->previous_position_.Pos().Y() - - this->current_position_.Pos().Y(), 2)); + double dS = 0.0; +// dS = std::sqrt(std::pow(this->previous_position_.Pos().X() - +// this->current_position_.Pos().X(), 2) + +// std::pow(this->previous_position_.Pos().Y() - +// this->current_position_.Pos().Y(), 2)); + for(int i = 1; i < this->step_poses.size(); i++) + { + const auto &pose_i_1 = this->step_poses[i-1]; + const auto &pose_i = this->step_poses[i]; + dS += Evaluator::measure_distance(pose_i_1, pose_i); + //save coordinations to coordinates.txt + std::ofstream coordinates; + coordinates.open(this->directory_name + "/coordinates.txt",std::ios::app); + if(i == 1) + { + start_position_ = pose_i_1; + coordinates << std::fixed << start_position_.Pos().X() << " " << start_position_.Pos().Y() << std::endl; + } + coordinates << std::fixed << pose_i.Pos().X() << " " << pose_i.Pos().Y() << std::endl; + } + fitness_value = dS / this->evaluation_rate_; } else if (this->locomotion_type == "directed") { - this->step_poses.push_back(this->current_position_); //step_poses: x y z roll pitch yaw +// std::cout << "step_poses.size(): " << step_poses.size() << " "; for (int i=1; i < this->step_poses.size(); i++) { const auto &pose_i_1 = this->step_poses[i-1]; @@ -92,20 +108,22 @@ double Evaluator::Fitness() if(i == 1) { - coordinates << std::fixed << start_position_.Pos().X() << " " << start_position_.Pos().Y() << std::endl; + start_position_ = pose_i_1; + coordinates << std::fixed << start_position_.Pos().X() << " " << start_position_.Pos().Y() << std::endl; } coordinates << std::fixed << pose_i.Pos().X() << " " << pose_i.Pos().Y() << std::endl; } - +// std::cout << "path_length " << path_length << " "; ////********** directed locomotion fitness function **********//// //directions(forward) of heads are the orientation(+x axis) - 1.570796 double beta0 = this->start_position_.Rot().Yaw()- M_PI/2.0; + if (beta0 < - M_PI) //always less than pi (beta0 + max(40degree) < pi) { beta0 = 2 * M_PI - std::abs(beta0); } - //save direction to coordinates.txt: This is used to make Figure 8 + //save direction to coordinates.txt std::ofstream coordinates; coordinates.open(this->directory_name + "/coordinates.txt",std::ios::app); coordinates << std::fixed << beta0 << std::endl; diff --git a/experiments/bo_learner/BOTuningAnalysis.py b/experiments/bo_learner/BOTuningAnalysis.py index 0136decf65..802dc50a5d 100644 --- a/experiments/bo_learner/BOTuningAnalysis.py +++ b/experiments/bo_learner/BOTuningAnalysis.py @@ -5,7 +5,7 @@ import matplotlib as mpl -path = "/home/gongjinlan/BO_tuning_results/main_1560002003-BO-spider9-finished/" +path = "/Users/lan/projects/revolve/BO_tuning_results/main_1560002003-BO-spider9-finished/" dirs = glob(path + "*/") # Base settings for alph, sigma_sq, l diff --git a/experiments/bo_learner/GridSearch.py b/experiments/bo_learner/GridSearch.py index 71f8c0f2c9..e1d4892833 100644 --- a/experiments/bo_learner/GridSearch.py +++ b/experiments/bo_learner/GridSearch.py @@ -19,24 +19,27 @@ # Parameters -min_lines = 1480 +min_lines = 1490 run_gazebo = False -n_runs = 15 # Naar 20 -n_jobs = 30 +n_runs = 1 # Naar 20 +n_jobs = 1 my_yaml_path = "experiments/bo_learner/yaml/" -yaml_model = "babyC.yaml" +yaml_model = "babyA.yaml" manager = "experiments/bo_learner/manager.py" python_interpreter = ".venv/bin/python3" search_space = { - 'n_learning_iterations': [1500], - 'n_init_samples': [20], + # 'load_brain': ["/Users/lan/projects/revolve/output/cpg_bo/one/main_1560413639/0/0/best_brain.txt"], 'evaluation_rate': [60], - 'verbose': [0], - 'kernel_sigma_sq': [1], - 'kernel_l': [0.02, 0.05, 0.1, 0.2], - 'acqui_ucb_alpha': [0.1, 0.3, 0.5, 1.0], - 'range_ub': [1.5], - 'signal_factor_all': [4.0], + 'init_method': ["LHS"], + 'verbose': [1], + 'kernel_l': [0.1], + 'acqui_ucb_alpha': [1.0], + 'n_learning_iterations': [1450], + 'n_init_samples': [50], + 'kernel_l': [0.2], + 'acqui_ucb_alpha': [3.0], + 'n_learning_iterations': [950], + 'n_init_samples': [50], } print(search_space) @@ -47,8 +50,8 @@ finished = False # Make in revolve/build to allow runs from terminal -os.system('cmake /home/gongjinlan/projects/revolve/ -DCMAKE_BUILD_TYPE="Release"') -os.system("make -j60") +os.system('cmake ~/projects/revolve/ -DCMAKE_BUILD_TYPE="Release"') +os.system("make -j4") def change_parameters(original_file, parameters): # Iterate over dictionary elements @@ -152,29 +155,32 @@ def run(i, sub_directory, model, params): # PASTE THE EXPERIMENTS HERE, IN THE FORMAT SHOWN BELOW experiments = [ - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, # BASE RUN - - # BASE RUN - {'init_method': "LHS", 'kernel_l': 0.2, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, - {'init_method': "LHS", 'kernel_l': 0.5, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, - {'init_method': "LHS", 'kernel_l': 1.0, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, - {'init_method': "LHS", 'kernel_l': 1.5, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, - - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 0.1, 'acqui_ucb_alpha': 0.5}, - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 0.2, 'acqui_ucb_alpha': 0.5}, - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 0.5, 'acqui_ucb_alpha': 0.5}, - # BASE RUN - - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.1}, - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.2}, - # BASE RUN - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 1.0}, - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 1.5}, - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 2.0}, - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 3.0}, - {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 4.0}, - - {'init_method': "RS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, + {'init_method': "LHS", 'gaussian_step_size': 0.2999, 'mutrate': 0.6}, + {'init_method': "LHS", 'gaussian_step_size': 0.2999, 'mutrate': 0.7}, + {'init_method': "LHS", 'gaussian_step_size': 0.3111, 'mutrate': 0.6}, + {'init_method': "LHS", 'gaussian_step_size': 0.3111, 'mutrate': 0.7}, + + # # BASE RUN + # {'init_method': "LHS", 'kernel_l': 0.2, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, + # {'init_method': "LHS", 'kernel_l': 0.5, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, + # {'init_method': "LHS", 'kernel_l': 1.0, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, + # {'init_method': "LHS", 'kernel_l': 1.5, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, + # + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 0.1, 'acqui_ucb_alpha': 0.5}, + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 0.2, 'acqui_ucb_alpha': 0.5}, + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 0.5, 'acqui_ucb_alpha': 0.5}, + # # BASE RUN + # + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.1}, + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.2}, + # # BASE RUN + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 1.0}, + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 1.5}, + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 2.0}, + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 3.0}, + # {'init_method': "LHS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 4.0}, + # + # {'init_method': "RS", 'kernel_l': 0.1, 'kernel_sigma_sq': 1.0, 'acqui_ucb_alpha': 0.5}, # BASE RUN ] # 'kernel_l': [0.02, 0.05, 0.1, 0.2], @@ -185,6 +191,7 @@ def run(i, sub_directory, model, params): # Get id's on the permutations for ix, my_dict in enumerate(experiments): my_dict["id"] = ix + experiments *=n_runs # Save to yaml files create_yamls(yaml_path=my_yaml_path, diff --git a/experiments/bo_learner/run_get_analytics.py b/experiments/bo_learner/run_get_analytics.py index c48c90fa6e..f411c272cb 100644 --- a/experiments/bo_learner/run_get_analytics.py +++ b/experiments/bo_learner/run_get_analytics.py @@ -1,8 +1,8 @@ from glob import glob import os -python_interpreter = "/home/maarten/CLionProjects/revolve/venv/bin/python" -path = "/home/maarten/CLionProjects/revolve/output/cpg_bo/main_1557477606/47/" +python_interpreter = "/Users/lan/projects/revolve/venv/bin/python" +path = "/Users/lan/projects/revolve/output/cpg_bo/main_1557477606/47/" paths = glob(path + "*/") diff --git a/experiments/bo_learner/yaml/babyA.yaml b/experiments/bo_learner/yaml/babyA.yaml index 6ee67bdc64..a714591dd9 100644 --- a/experiments/bo_learner/yaml/babyA.yaml +++ b/experiments/bo_learner/yaml/babyA.yaml @@ -4,9 +4,9 @@ body: id : Core type : Core params: - red: 0.04 - green: 0.26 - blue: 0.72 + red: 0.98 + green: 0.98 + blue: 0.98 children : 1: slot : 0 @@ -24,8 +24,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : -90 children : 1: @@ -44,8 +44,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : -90 children: 2: @@ -64,8 +64,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : 0 3: slot : 0 @@ -83,8 +83,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : 0 2: slot : 0 @@ -102,8 +102,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : 0 3: slot : 0 @@ -163,28 +163,31 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: - n_init_samples: 50 + n_init_samples: 10 init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 1.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 3.0 acqui_ei_jitter: 0 acquisition_function: "UCB" + gaussian_step_size: 0.3 + covrate: 0.4 + mutrate: 0.6 meta: - robot_size: 26 - run_analytics: "true" - n_learning_iterations: 1450 - n_cooldown_iterations: 1 + robot_size: 16 + run_analytics: "false" + n_learning_iterations: 30 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/babyB.yaml b/experiments/bo_learner/yaml/babyB.yaml index 1144e9f64f..e4e7104f4d 100644 --- a/experiments/bo_learner/yaml/babyB.yaml +++ b/experiments/bo_learner/yaml/babyB.yaml @@ -203,20 +203,20 @@ brain: init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 1.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 3.0 acqui_ei_jitter: 0 acquisition_function: "UCB" meta: - robot_size: 26 - run_analytics: "true" + robot_size: 22 + run_analytics: "false" n_learning_iterations: 1450 - n_cooldown_iterations: 1 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/babyC.yaml b/experiments/bo_learner/yaml/babyC.yaml index 07b15884af..f98c7112f0 100644 --- a/experiments/bo_learner/yaml/babyC.yaml +++ b/experiments/bo_learner/yaml/babyC.yaml @@ -309,7 +309,7 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: @@ -317,20 +317,20 @@ brain: init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 1.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 3.0 acqui_ei_jitter: 0 acquisition_function: "UCB" meta: - robot_size: 26 - run_analytics: "true" + robot_size: 32 + run_analytics: "false" n_learning_iterations: 1450 n_cooldown_iterations: 1 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/gecko12.yaml b/experiments/bo_learner/yaml/gecko12.yaml index bb6dfaaeab..f6209fd37a 100644 --- a/experiments/bo_learner/yaml/gecko12.yaml +++ b/experiments/bo_learner/yaml/gecko12.yaml @@ -226,7 +226,7 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: @@ -234,20 +234,20 @@ brain: init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 2.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 3.0 acqui_ei_jitter: 0 acquisition_function: "UCB" meta: - robot_size: 26 - run_analytics: "true" + robot_size: 23 + run_analytics: "false" n_learning_iterations: 1450 - n_cooldown_iterations: 1 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/gecko17.yaml b/experiments/bo_learner/yaml/gecko17.yaml index 1c32cf0ede..092a7b77d5 100644 --- a/experiments/bo_learner/yaml/gecko17.yaml +++ b/experiments/bo_learner/yaml/gecko17.yaml @@ -318,7 +318,7 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: @@ -334,12 +334,12 @@ brain: acqui_ei_jitter: 0 acquisition_function: "UCB" meta: - robot_size: 26 - run_analytics: "true" + robot_size: 33 + run_analytics: "false" n_learning_iterations: 1450 - n_cooldown_iterations: 1 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/gecko7.yaml b/experiments/bo_learner/yaml/gecko7.yaml index 00b3eee25e..969f40d013 100644 --- a/experiments/bo_learner/yaml/gecko7.yaml +++ b/experiments/bo_learner/yaml/gecko7.yaml @@ -5,8 +5,8 @@ body: type : Core params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 children : 2: slot : 0 @@ -24,8 +24,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : 0 3: slot : 0 @@ -43,8 +43,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : 0 1: slot : 0 @@ -62,8 +62,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : -90 children : 1: @@ -82,8 +82,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : -90 children: 2: @@ -102,8 +102,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : 0 3: slot : 0 @@ -121,8 +121,8 @@ body: type : FixedBrick params: red: 0.04 - green: 0.26 - blue: 0.72 + green: 0.98 + blue: 0.04 orientation : 0 brain: type: bo-cpg @@ -135,7 +135,7 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: @@ -143,20 +143,20 @@ brain: init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 1.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 3.0 acqui_ei_jitter: 0 acquisition_function: "UCB" meta: - robot_size: 26 - run_analytics: "true" + robot_size: 13 + run_analytics: "false" n_learning_iterations: 1450 - n_cooldown_iterations: 1 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/one.yaml b/experiments/bo_learner/yaml/one.yaml new file mode 100644 index 0000000000..7c786f658e --- /dev/null +++ b/experiments/bo_learner/yaml/one.yaml @@ -0,0 +1,158 @@ +--- +id: example_one+ +body: + id : Core + type : Core + params: + red: 0.98 + green: 0.98 + blue: 0.98 + children : + 2: + slot : 0 + id : BodyJoint0 + type : ActiveHinge + params: + red: 0.98 + green: 0.98 + blue: 0.98 + orientation : 90 + children : + 1: + slot : 0 + id : Body0 + type : FixedBrick + params: + red: 0.04 + green: 0.98 + blue: 0.04 + orientation : -90 + children : + 1: + slot : 0 + id : BodyJoint1 + type : ActiveHinge + params: + red: 0.98 + green: 0.98 + blue: 0.98 + orientation : 90 + children : + 1: + slot : 0 + id : Body1 + type : FixedBrick + params: + red: 0.04 + green: 0.98 + blue: 0.04 + orientation : -90 + children: + 2: + slot : 0 + id : Leg10Joint + type : ActiveHinge + params: + red: 0.98 + green: 0.98 + blue: 0.98 + orientation : 0 + children : + 1: + slot : 0 + id : Leg10 + type : FixedBrick + params: + red: 0.04 + green: 0.98 + blue: 0.04 + orientation : 0 + 3: + slot : 0 + id : Leg11Joint + type : ActiveHinge + params: + red: 0.98 + green: 0.98 + blue: 0.98 + orientation : 0 + children : + 1: + slot : 0 + id : Leg11 + type : FixedBrick + params: + red: 0.04 + green: 0.98 + blue: 0.04 + orientation : 0 + 3: + slot : 0 + id : Leg30Joint + type : ActiveHinge + params: + red: 0.98 + green: 0.98 + blue: 0.98 + orientation : 90 + children : + 1: + id : Leg30 + type : FixedBrick + params: + red: 0.04 + green: 0.04 + blue: 0.98 + orientation : -90 + children : + 1: + id : Leg31Joint + type : ActiveHinge + params: + red: 0.98 + green: 0.98 + blue: 0.98 + children : + 1: + id : Leg31 + type : FixedBrick + params: + red: 0.04 + green: 0.04 + blue: 0.98 + orientation : 0 +brain: + type: bo-cpg + controller: + use_frame_of_reference: 0 + load_brain: "" + reset_neuron_state_bool: "true" + reset_neuron_random: "false" + abs_output_bound: 1.0 + signal_factor_all: 4.0 + signal_factor_mid: 2.5 + signal_factor_left_right: 2.5 + range_lb: -1.5 + range_ub: 1.5 + init_neuron_state: 0.707 + learner: + n_init_samples: 20 + init_method: "LHS" + kernel_noise: 0.00000001 + kernel_optimize_noise: "false" + kernel_sigma_sq: 0.001 + kernel_l: 0.1 + kernel_squared_exp_ard_k: 4 + acqui_gpucb_delta: 0.1 + acqui_ucb_alpha: 2.0 + acqui_ei_jitter: 0 + acquisition_function: "UCB" + meta: + run_analytics: "true" + n_learning_iterations: 980 + n_cooldown_iterations: 0 + reset_robot_position: "true" + evaluation_rate: 60 + output_directory: "" + verbose: 1 + startup_time: 0 diff --git a/experiments/bo_learner/yaml/spider13.yaml b/experiments/bo_learner/yaml/spider13.yaml index 5fed980f4e..a535323cd2 100644 --- a/experiments/bo_learner/yaml/spider13.yaml +++ b/experiments/bo_learner/yaml/spider13.yaml @@ -231,7 +231,7 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: @@ -239,20 +239,20 @@ brain: init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 1.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 2.0 acqui_ei_jitter: 0 acquisition_function: "UCB" meta: robot_size: 26 - run_analytics: "true" + run_analytics: "false" n_learning_iterations: 1450 - n_cooldown_iterations: 1 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/spider17.yaml b/experiments/bo_learner/yaml/spider17.yaml index 2cdae21c26..81848303dd 100644 --- a/experiments/bo_learner/yaml/spider17.yaml +++ b/experiments/bo_learner/yaml/spider17.yaml @@ -303,7 +303,7 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: @@ -311,20 +311,20 @@ brain: init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 1.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 3.0 acqui_ei_jitter: 0 acquisition_function: "UCB" meta: robot_size: 34 - run_analytics: "true" + run_analytics: "false" n_learning_iterations: 1450 - n_cooldown_iterations: 1 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/bo_learner/yaml/spider9.yaml b/experiments/bo_learner/yaml/spider9.yaml index 7e153dceef..b31b6ab97c 100644 --- a/experiments/bo_learner/yaml/spider9.yaml +++ b/experiments/bo_learner/yaml/spider9.yaml @@ -155,7 +155,7 @@ brain: signal_factor_all: 4.0 signal_factor_mid: 2.5 signal_factor_left_right: 2.5 - range_lb: -1.0 + range_lb: -1.5 range_ub: 1.5 init_neuron_state: 0.707 learner: @@ -163,20 +163,20 @@ brain: init_method: "LHS" kernel_noise: 0.00000001 kernel_optimize_noise: "false" - kernel_sigma_sq: 0.222 - kernel_l: 0.55 + kernel_sigma_sq: 1.0 + kernel_l: 0.2 kernel_squared_exp_ard_k: 4 acqui_gpucb_delta: 0.5 - acqui_ucb_alpha: 0.44 + acqui_ucb_alpha: 3.0 acqui_ei_jitter: 0 acquisition_function: "UCB" meta: robot_size: 18 run_analytics: "true" n_learning_iterations: 1450 - n_cooldown_iterations: 1 + n_cooldown_iterations: 0 reset_robot_position: "false" - evaluation_rate: 60 + evaluation_rate: 30 output_directory: "" - verbose: 0 + verbose: 1 startup_time: 0 diff --git a/experiments/examples/manager.py b/experiments/examples/manager.py index 3dacc321c2..b7434726d5 100755 --- a/experiments/examples/manager.py +++ b/experiments/examples/manager.py @@ -27,6 +27,7 @@ async def run(): robot = revolve_bot.RevolveBot() robot.load_file("experiments/examples/yaml/spider.yaml") robot.update_substrate() + # robot.save_file("./spider.sdf", conf_type='sdf') # Connect to the simulator and pause world = await World.create(settings) diff --git a/pyrevolve/revolve_bot/brain/bo_cpg.py b/pyrevolve/revolve_bot/brain/bo_cpg.py index 7f8a28c140..915d62cef0 100644 --- a/pyrevolve/revolve_bot/brain/bo_cpg.py +++ b/pyrevolve/revolve_bot/brain/bo_cpg.py @@ -35,6 +35,9 @@ def __init__(self): self.acqui_ucb_alpha = None self.acqui_ei_jitter = None self.n_init_samples = None + self.gaussian_step_size = None + self.covrate = None + self.mutrate = None # Various self.robot_size = None @@ -91,6 +94,9 @@ def learner_sdf(self): 'acqui_gpucb_delta': str(self.acqui_gpucb_delta), 'acqui_ucb_alpha': str(self.acqui_ucb_alpha), 'acqui_ei_jitter': str(self.acqui_ei_jitter), + 'gaussian_step_size': str(self.gaussian_step_size), + 'covrate': str(self.covrate), + 'mutrate': str(self.mutrate), }) def controller_sdf(self): diff --git a/thirdparty/galgo/.DS_Store b/thirdparty/galgo/.DS_Store new file mode 100644 index 0000000000..195b592d16 Binary files /dev/null and b/thirdparty/galgo/.DS_Store differ diff --git a/thirdparty/galgo/galgo/Chromosome.hpp b/thirdparty/galgo/galgo/Chromosome.hpp new file mode 100755 index 0000000000..d2296ba1b6 --- /dev/null +++ b/thirdparty/galgo/galgo/Chromosome.hpp @@ -0,0 +1,599 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef CHROMOSOME_HPP +#define CHROMOSOME_HPP + +namespace galgo +{ + + //================================================================================================= + + template class Chromosome + { + public: + // constructor + Chromosome(const GeneticAlgorithm &ga); + + // copy constructor + Chromosome(const Chromosome< T > &rhs); + + // create new chromosome + void create(int num_in_pop, Eigen::VectorXd indiv); + + // create new chromosome +// void create(); + + // initialize chromosome + void initialize(); + + // evaluate chromosome + void evaluate(); + + // evaluate 0 generations chromosome + void evaluate0gen(Eigen::VectorXd observations); + + //get the number of generations + int get_ga_nogen() const; + + // reset chromosome + void reset(); + + // set or replace kth gene by a new one + void setGene(int k); + + // initialize or replace kth gene by a know value + void initGene(int k, T value); + + // add bit to chromosome + void addBit(char bit); + + // initialize or replace an existing chromosome bit + void setBit(char bit, int pos); + + // flip an existing chromosome bit + void flipBit(int pos); + + // get chromosome bit + char getBit(int pos) const; + + // initialize or replace a portion of bits with a portion of another chromosome + void setPortion( + const Chromosome< T > &x, + int start, + int end); + + // initialize or replace a portion of bits with a portion of another chromosome + void setPortion( + const Chromosome< T > &x, + int start); + + // get parameter value(s) from chromosome + const std::vector< T > &getParam() const; + + // get objective function result + const std::vector< double > &getResult() const; + + // get the total sum of all objective function(s) result + double getTotal() const; + + // get constraint value(s) + const std::vector< double > getConstraint() const; + + // return chromosome size in number of bits + int size() const; + + // return number of chromosome bits to mutate + double mutrate() const; + + const MutationInfo &mutinfo() const; + + double recombination_ratio() const; + + // return number of genes in chromosome + int nbgene() const; + + // return numero of generation this chromosome belongs to + int nogen() const; + + // return lower bound(s) + const std::vector< T > &lowerBound() const; + + // return upper bound(s) + const std::vector< T > &upperBound() const; + + T get_value(int k) const; + + double get_sigma(int k) const; + + long get_sigma_iteration(int k) const; + + void sigma_update( + int k, + double new_sigma) + { + _sigma_iteration[k]++; + _sigma[k] = new_sigma; + } + + private: + std::vector< T > param; // estimated parameter(s) + std::vector< double > result; // chromosome objective function(s) result + std::string chr; // string of bits representing chromosome + const GeneticAlgorithm *ptr = nullptr; //pointer to genetic algorithm + ///******* new **********/// + std::ifstream infile; + std::ifstream numgens; +// std::vector< double > indiv; + std::vector< double > _sigma; // stddev per parameter + std::vector< long > _sigma_iteration; //number of time _sigma was updated + + int clusterCount = 10; //pop_size + int dimens = 20; //dimensions + + public: + double fitness; // chromosome fitness, objective function(s) result that can be modified (adapted to constraint(s), set to positive values, etc...) + + private: + double total; // total sum of objective function(s) result + int chrsize; // chromosome size (in number of bits) + int numgen; // numero of generation + }; + + /*-------------------------------------------------------------------------------------------------*/ + + // constructor + template + Chromosome< T >::Chromosome(const GeneticAlgorithm &ga) + { + param.resize(ga.nbparam); + _sigma.resize(ga.nbparam); + _sigma_iteration.resize(ga.nbparam); + + ptr = &ga; + chrsize = ga.nbbit; + numgen = ga.nogen; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // copy constructor + template Chromosome< T >::Chromosome(const Chromosome< T > &rhs) + { + param = rhs.param; + _sigma = rhs._sigma; + _sigma_iteration = rhs._sigma_iteration; + + result = rhs.result; + chr = rhs.chr; + ptr = rhs.ptr; + + // re-initializing fitness to its original value + fitness = rhs.total; + total = rhs.total; + chrsize = rhs.chrsize; + numgen = rhs.numgen; + } + + /*-------------------------------------------------------------------------------------------------*/ + // create new chromosome //called in row 171 population.hpp + template inline void Chromosome< T >::create(int num_in_pop, Eigen::VectorXd indiv) + { + chr.clear(); + // pass MaxIndivPerCluster to param + int j(0); + for (const auto &x : ptr->param) // for(0 : dimensions) + { + param[j] = indiv[j]; +// std::cout << param[j] << ", "; + + T newvalue = (T) indiv[j]; + initGene(j, newvalue); + + _sigma[j] = 0.0; + _sigma_iteration[j] = 0; + j++; + } + } + + // create new chromosome +// template +// inline void Chromosome::create() +// { +// chr.clear(); +// +// int i(0); +// for (const auto& x : ptr->param) +// { +// // encoding parameter random value +// std::string str = x->encode(); +// chr.append(str); +// +// _sigma[i] = 0.0; +// _sigma_iteration[i] = 0; +// +// i++; +// } +// } + /*-------------------------------------------------------------------------------------------------*/ + + // initialize chromosome (from known parameter values) + template inline void Chromosome< T >::initialize() + { + chr.clear(); + + int i(0); + for (const auto &x : ptr->param) + { + // encoding parameter initial value + std::string str = x->encode(ptr->initialSet[i++]); + chr.append(str); + + //_sigma[i] = 0.0; + //_sigma_iteration[i] = 0; + } + } + + /*-------------------------------------------------------------------------------------------------*/ + + // evaluate chromosome fitness + template inline void Chromosome< T >::evaluate() + { + int i(0); + for (const auto &x : ptr->param) + { + // decoding chromosome: converting chromosome string into a real value + param[i] = x->decode(chr.substr(ptr->idx[i], x->size())); + std::cout << " param[" << i << "]: " << param[i]; + i++; + } + + // computing objective result(s) + result = ptr->Objective(param); + + // computing sum of all results (in case there is not only one objective functions) + total = std::accumulate(result.begin(), result.end(), 0.0); + + // initializing fitness to this total + fitness = total; + std::cout << " fitness: " << fitness << std::endl; + + std::ofstream outvalue; + outvalue.open("../value.txt", std::ios::app); + outvalue << std::fixed << fitness << std::endl; + } + + /*-------------------------------------------------------------------------------------------------*/ + // evaluate chromosome fitness for 0 generations + template + inline void Chromosome::evaluate0gen(Eigen::VectorXd observation) + { + /// removed passing the value to param, with clustering pop as instead + // computing objective result(s) std::vector< double > result; + //result = ptr->Objective(param); //std::vector Objective + std::vector vec(observation.data(), observation.data() + observation.size()); + result.push_back(vec[0]); + // computing sum of all results (in case there is not only one objective functions) + total = result[0]; //std::accumulate(result.begin(), result.end(), 0.0); double total + + // initializing fitness to this total + fitness = total; //double fitness +// std::cout << "result[0]: " << result[0] +// << " total: " << total +// << " fitness: " << fitness << std::endl; +// +// std::ofstream outvalue; +// outvalue.open("../value.txt", std::ios::app); +// outvalue << std::fixed << fitness << std::endl; + } + /*-------------------------------------------------------------------------------------------------*/ + // get the number of generations + template + inline int Chromosome::get_ga_nogen() const + { + return ptr->nogen; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // reset chromosome + template inline void Chromosome< T >::reset() + { + chr.clear(); + result = 0.0; + total = 0.0; + fitness = 0.0; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // set or replace kth gene by a new one + template inline void Chromosome< T >::setGene(int k) + { +#ifndef NDEBUG + if (k < 0 || k >= ptr->nbparam) + { + throw std::invalid_argument( + "Error: in galgo::Chromosome::setGene(int), argument cannot be outside interval [0,nbparam-1], please amend."); + } +#endif + + // generating a new gene + std::string s = ptr->param[k]->encode(); + + // adding or replacing gene in chromosome + chr.replace(ptr->idx[k], s.size(), s, 0, s.size()); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // initialize or replace kth gene by a know value + template + inline void Chromosome< T >::initGene( + int k, + T x) + { +#ifndef NDEBUG + if (k < 0 || k >= ptr->nbparam) + { + throw std::invalid_argument( + "Error: in galgo::Chromosome::initGene(int), first argument cannot be outside interval [0,nbparam-1], please amend."); + } +#endif + + // encoding gene + std::string s = ptr->param[k]->encode(x); + + // adding or replacing gene in chromosome + chr.replace(ptr->idx[k], s.size(), s, 0, s.size()); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // add chromosome bit to chromosome (when constructing a new one) + template inline void Chromosome< T >::addBit(char bit) + { + chr.push_back(bit); + +#ifndef NDEBUG + if (chr.size() > chrsize) + { + throw std::out_of_range( + "Error: in galgo::Chromosome::setBit(char), exceeding chromosome size."); + } +#endif + } + + /*-------------------------------------------------------------------------------------------------*/ + + // initialize or replace an existing chromosome bit + template + inline void Chromosome< T >::setBit( + char bit, + int pos) + { +#ifndef NDEBUG + if (pos >= chrsize) + { + throw std::out_of_range( + "Error: in galgo::Chromosome::replaceBit(char, int), second argument cannot be equal or greater than chromosome size."); + } +#endif + + std::stringstream ss; + std::string str; + ss << bit; + ss >> str; + chr.replace(pos, 1, str); + std::cout << chr << "\n"; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // flip an existing chromosome bit + template inline void Chromosome< T >::flipBit(int pos) + { +#ifndef NDEBUG + if (pos >= chrsize) + { + throw std::out_of_range( + "Error: in galgo::Chromosome::flipBit(int), argument cannot be equal or greater than chromosome size."); + } +#endif + + if (chr[pos] == '0') + { + chr.replace(pos, 1, "1"); + } + else + { + chr.replace(pos, 1, "0"); + } + } + + /*-------------------------------------------------------------------------------------------------*/ + + // get a chromosome bit + template inline char Chromosome< T >::getBit(int pos) const + { +#ifndef NDEBUG + if (pos >= chrsize) + { + throw std::out_of_range( + "Error: in galgo::Chromosome::getBit(int), argument cannot be equal or greater than chromosome size."); + } +#endif + + return chr[pos]; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // initialize or replace a portion of bits with a portion of another chromosome (from position start to position end included) + template + inline void Chromosome< T >::setPortion( + const Chromosome< T > &x, + int start, + int end) + { +#ifndef NDEBUG + if (start > chrsize) + { + throw std::out_of_range( + "Error: in galgo::Chromosome::setPortion(const Chromosome&, int, int), second argument cannot be greater than chromosome size."); + } +#endif + ///replace chr(strat, end-start+1) with x.chr(start, end-start+1) + chr.replace(start, end - start + 1, x.chr, start, end - start + 1); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // initialize or replace a portion of bits with a portion of another chromosome (from position start to the end of he chromosome) + template + inline void Chromosome< T >::setPortion( + const Chromosome< T > &x, + int start) + { +#ifndef NDEBUG + if (start > chrsize) + { + throw std::out_of_range( + "Error: in galgo::Chromosome::setPortion(const Chromosome&, int), second argument cannot be greater than chromosome size."); + } +#endif + ///replace chr(strat, chrsize) with x.chr(start, x.chrsize) + chr.replace(start, chrsize, x.chr, start, x.chrsize); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // get parameter value(s) from chromosome + template + inline const std::vector< T > &Chromosome< T >::getParam() const + { + return param; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // get objective function result + template + inline const std::vector< double > &Chromosome< T >::getResult() const + { + return result; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // get the total sum of all objective function(s) result + template inline double Chromosome< T >::getTotal() const + { + return total; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // get constraint value(s) for this chromosome + template + inline const std::vector< double > Chromosome< T >::getConstraint() const + { + return ptr->Constraint(param); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return chromosome size in number of bits + template inline int Chromosome< T >::size() const + { + return chrsize; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return mutation rate + template inline double Chromosome< T >::mutrate() const + { + return ptr->mutrate; + } + + template + inline double Chromosome< T >::recombination_ratio() const + { + return ptr->recombination_ratio; + } + + template + inline const MutationInfo &Chromosome< T >::mutinfo() const + { + return ptr->mutinfo; + } + + + /*-------------------------------------------------------------------------------------------------*/ + + // return number of genes in chromosome + template inline int Chromosome< T >::nbgene() const + { + return ptr->nbparam; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return numero of generation this chromosome belongs to + template inline int Chromosome< T >::nogen() const + { + return numgen; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return lower bound(s) + template + inline const std::vector< T > &Chromosome< T >::lowerBound() const + { + return ptr->lowerBound; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return upper bound(s) + template + inline const std::vector< T > &Chromosome< T >::upperBound() const + { + return ptr->upperBound; + } + + + template T Chromosome< T >::get_value(int k) const + { +#ifndef NDEBUG + if (k < 0 || k >= ptr->nbparam) + { + throw std::invalid_argument( + "Error: in galgo::Chromosome::get_value(int), first argument cannot be outside interval [0,nbparam-1], please amend."); + } +#endif + + auto &x = ptr->param[k]; + return x->decode(chr.substr(ptr->idx[k], x->size())); + } + + + template double Chromosome< T >::get_sigma(int k) const + { + return _sigma[k]; + } + + template long Chromosome< T >::get_sigma_iteration(int k) const + { + return _sigma_iteration[k]; + } + //================================================================================================= + +} + +#endif diff --git a/thirdparty/galgo/galgo/Converter.hpp b/thirdparty/galgo/galgo/Converter.hpp new file mode 100755 index 0000000000..b6ad0e86bd --- /dev/null +++ b/thirdparty/galgo/galgo/Converter.hpp @@ -0,0 +1,40 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef CONVERTER_HPP +#define CONVERTER_HPP + +namespace galgo { + +//================================================================================================= + +// convert unsigned long long integer to binary string +std::string GetBinary(uint64_t value) +{ + std::bitset bits(value); + // NB: CHAR_BIT = number of bits in char usually 8 but not always on older machines + return bits.to_string(); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// convert binary string to unsigned long long integer +uint64_t GetValue(const std::string& s) +{ + uint64_t value, x = 0; + for (std::string::const_iterator it = s.begin(), end = s.end(); it != end; ++it) { + x = (x << 1) + (*it - '0'); + } + memcpy(&value, &x, sizeof(uint64_t)); + + return value; +} + +//================================================================================================= + +} + +#endif diff --git a/thirdparty/galgo/galgo/Evolution.hpp b/thirdparty/galgo/galgo/Evolution.hpp new file mode 100755 index 0000000000..09dff34015 --- /dev/null +++ b/thirdparty/galgo/galgo/Evolution.hpp @@ -0,0 +1,1075 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef EVOLUTION_HPP +#define EVOLUTION_HPP + +// In this header, the user can define his own selection, cross-over, mutation and adaptation to +// constraint(s) methods by respecting the function declaration template + +//================================================================================================= + +// SELECTION METHODS + +/*-------------------------------------------------------------------------------------------------*/ + +// proportional roulette wheel selection +template void RWS(galgo::Population< T > &x) +{ + // adjusting all fitness to positive values + x.adjustFitness(); + + // computing fitness sum + double fitsum = x.getSumFitness(); + + // selecting mating population + for (int i = 0, end = x.matsize(); i < end; ++i) + { + int j = 0; + if (fitsum > 0.0) + { + // generating a random fitness sum in [0,fitsum) + double fsum = galgo::uniform< double >(0.0, fitsum); + + while (fsum >= 0.0) + { +#ifndef NDEBUG + if (j == x.popsize()) + { + throw std::invalid_argument( + "Error: in RWS(galgo::Population&) index j cannot be equal to population size."); + } +#endif + fsum -= x(j)->fitness; + j++; + } + } + else + { + j = 1; + } + + // selecting element + x.select(j - 1); + } +} + +/*-------------------------------------------------------------------------------------------------*/ + +// stochastic universal sampling selection +template void SUS(galgo::Population< T > &x) +{ + // adjusting all fitness to positive values + x.adjustFitness(); + + // computing fitness sum + double fitsum = x.getSumFitness(); + + int matsize = x.matsize(); + + // computing interval size + double dist = fitsum / matsize; + + if (fitsum == 0.0) + { + dist = 1.0; // BUG to fix + } + + // initializing pointer + double ptr = galgo::uniform< double >(0.0, dist); + + // selecting mating population + for (int i = 0; i < matsize; ++i) + { + + int j = 0; + double fsum = 0; + + while (fsum <= ptr) + { + //#ifndef NDEBUG + if (j == x.popsize()) + { + //throw std::invalid_argument("Error: in SUS(galgo::Population&) index j cannot be equal to population size."); + j = 1; + break; + } + //#endif + if (j < matsize) + { + fsum += x(j)->fitness; + if (j < matsize - 1) + { + j++; + } + else + { + // BUG to fix + j = 1; + break; + } + } + else + { + // BUG to fix + j = 1; + break; + } + } + + // selecting element + x.select(j - 1); + + // incrementing pointer + ptr += dist; + } +} + +/*-------------------------------------------------------------------------------------------------*/ + +// classic linear rank-based selection +template void RNK(galgo::Population< T > &x) +{ + int popsize = x.popsize(); + static std::vector< int > rank(popsize); + static int ranksum; + + // this will only be run at the first generation + if (x.nogen() == 1) + { + int n = popsize + 1; + // generating ranks from highest to lowest + std::generate_n(rank.begin(), popsize, [&n]() -> int + { return --n; }); + // computing sum of ranks + ranksum = int(.5 * popsize * (popsize + 1)); + } + + // selecting mating population + for (int i = 0, end = x.matsize(); i < end; ++i) + { + // generating a random rank sum in [1,ranksum) + int rsum = galgo::uniform< int >(1, ranksum); + + int j = 0; + while (rsum > 0) + { +#ifndef NDEBUG + if (j == popsize) + { + throw std::invalid_argument( + "Error: in RNK(galgo::Population&) index j cannot be equal to population size."); + } +#endif + rsum -= rank[j]; + j++; + } + // selecting element + x.select(j - 1); + } +} + +/*-------------------------------------------------------------------------------------------------*/ + +// linear rank-based selection with selective pressure +template void RSP(galgo::Population< T > &x) +{ + int popsize = x.popsize(); + static std::vector< double > rank(popsize); + static double ranksum; + + // this will only be run at the first generation + if (x.nogen() == 1) + { + // initializing ranksum + ranksum = 0.0; + // generating ranks from highest to lowest + for (int i = 0; i < popsize; ++i) + { + rank[i] = 2 - x.SP() + 2 * (x.SP() - 1) * (popsize - i) / popsize; + ranksum += rank[i]; + } + } + + // selecting mating population + for (int i = 0, end = x.matsize(); i < end; ++i) + { + // generating a random rank sum in [0,ranksum) + double rsum = galgo::uniform< double >(0.0, ranksum); + + int j = 0; + while (rsum >= 0.0) + { +#ifndef NDEBUG + if (j == popsize) + { + throw std::invalid_argument( + "Error: in RSP(galgo::Population&) index j cannot be equal to population size."); + } +#endif + rsum -= rank[j]; + j++; + } + // selecting element + x.select(j - 1); + } +} + +/*-------------------------------------------------------------------------------------------------*/ + +// tournament selection +template void TNT(galgo::Population< T > &x) +{ + int popsize = x.popsize(); + int tntsize = x.tntsize(); + + // selecting mating population + for (int i = 0, end = x.matsize(); i < end; ++i) //matsize() = popsize + { + // selecting randomly a first element + int bestIdx = galgo::uniform< int >(0, popsize); + // the operator() access element in current population at position pos + double bestFit = x(bestIdx)->fitness; + + // starting k-tournament,tntsize = k = 2/4 + for (int j = 1; j < tntsize; ++j) + { + int idx = galgo::uniform< int >(0, popsize); + //the operator() access element in current population at position pos + double fit = x(idx)->fitness; + if (fit > bestFit) + { + bestFit = fit; + bestIdx = idx; + } + } + // selecting element at position bestIdx in current population and copy it into mating population + x.select(bestIdx); //matpop[matidx] = curpop[pos]; + } +} + +/*-------------------------------------------------------------------------------------------------*/ + +// transform ranking selection +template void TRS(galgo::Population< T > &x) +{ + static double c; + // (re)initializing when running new GA + if (x.nogen() == 1) + { + c = 0.2; + } + int popsize = x.popsize(); + + // generating a random set of popsize values on [0,1) + std::vector< double > r(popsize); + std::for_each(r.begin(), r.end(), [](double &z) -> double + { return galgo::proba(galgo::rng); }); + + // sorting them from highest to lowest + std::sort(r.begin(), r.end(), []( + double z1, + double z2) -> bool + { return z1 > z2; }); + // transforming population fitness + auto it = x.begin(); + std::for_each(r.begin(), r.end(), [&it, popsize](double z) -> void + { + (*it)->fitness = ceil((popsize - popsize * exp(-c * z)) / (1 - exp(-c))); + it++; + }); + + // updating c for next generation + c = c + 0.1; // arithmetic transition + //c = c * 1.1; // geometric transition + // computing fitness sum + double fitsum = x.getSumFitness(); + + // selecting mating population + for (int i = 0, end = x.matsize(); i < end; ++i) + { + int j = 0; + if (fitsum > 0.0) + { + // generating a random fitness sum in [0,fitsum) + double fsum = galgo::uniform< double >(0, fitsum); + while (fsum >= 0) + { +#ifndef NDEBUG + if (j == popsize) + { + throw std::invalid_argument( + "Error: in TRS(galgo::Population&) index j cannot be equal to population size."); + } +#endif + fsum -= x(j)->fitness; + j++; + } + } + else + { + j = 1; + } + + // selecting element + x.select(j - 1); + } +} + +/*-------------------------------------------------------------------------------------------------*/ + +// CROSS-OVER METHODS + +template void transmit_sigma( + double r, + const galgo::Chromosome< T > &chrmat1, + const galgo::Chromosome< T > &chrmat2, + galgo::CHR< T > &chr1, + galgo::CHR< T > &chr2) +{ + for (int i = 0; i < chr1->nbgene(); i++) + { + chr1->sigma_update(i, + (r * chrmat1.get_sigma(i)) + ((1.0 - r) * chrmat2 + .get_sigma(i))); + } + for (int i = 0; i < chr2->nbgene(); i++) + { + chr2->sigma_update(i, + (r * chrmat2.get_sigma(i)) + ((1.0 - r) * chrmat1 + .get_sigma(i))); + } +} + +template void transmit_sigma( + const galgo::Chromosome< T > &chrmat1, + galgo::Chromosome< T > &chr1) +{ + for (int i = 0; i < chr1.nbgene(); i++) + { + chr1.sigma_update(i, chrmat1.get_sigma(i)); + } +} + +/*-------------------------------------------------------------------------------------------------*/ +template void RealValuedSimpleArithmeticRecombination( + const galgo::Population< T > &x, + galgo::CHR< T > &chr1, + galgo::CHR< T > &chr2) +{ + // choosing randomly 2 chromosomes from mating population + int idx1 = galgo::uniform< int >(0, x.matsize()); + int idx2 = galgo::uniform< int >(0, x.matsize()); + if (x.matsize() >= 2) + { + while (idx1 == idx2) + { + idx2 = galgo::uniform< int >(0, x.matsize()); + }// find not same parents + } + + // choosing randomly a position for cross-over + int pos = galgo::uniform< int >(0, chr1->nbgene()); + double r = chr1->recombination_ratio(); + // std::cout << "pos: " << pos << " r: " << r << " "; + + // *x[idx1] is operator[](int pos) is access element in mating population at position pos + //operator [] is a element function of class population in row 267 population.hpp + const galgo::Chromosome< T > &chrmat1 = *x[idx1]; + const galgo::Chromosome< T > &chrmat2 = *x[idx2]; + + for (int i = 0; i < pos; i++) + { + chr1->initGene(i, chrmat1.get_value(i)); + } + for (int i = pos; i < chr1->nbgene(); i++) + { + // std::cout << "chr1value: " << chrmat1.get_value(i) + // << " chr2value: " << chrmat2.get_value(i) + // << " chr1newXover: " + // << (r * chrmat2.get_value(i)) + ((1.0 - r) * chrmat1.get_value(i)) << " "; + chr1->initGene(i, (T)((r * chrmat2.get_value(i)) + ((1.0 - r) * chrmat1.get_value(i)))); + } + // std::cout << std::endl; + for (int i = 0; i < pos; i++) + { + chr2->initGene(i, chrmat2.get_value(i)); + } + for (int i = pos; i < chr2->nbgene(); i++) + { + chr2->initGene(i, (T)((r * chrmat1.get_value(i)) + ((1.0 - r) * chrmat2.get_value(i)))); + } + + // Transmit sigma + transmit_sigma< T >(r, chrmat1, chrmat2, chr1, chr2); +} + +template void RealValuedSingleArithmeticRecombination( + const galgo::Population< T > &x, + galgo::CHR< T > &chr1, + galgo::CHR< T > &chr2) +{ + // choosing randomly 2 chromosomes from mating population + int idx1 = galgo::uniform< int >(0, x.matsize()); + int idx2 = galgo::uniform< int >(0, x.matsize()); + if (x.matsize() >= 2) + { + while (idx1 == idx2) + { + idx2 = galgo::uniform< int >(0, x.matsize()); + } // find not unique parents + } + + // choosing randomly a position for cross-over + int pos = galgo::uniform< int >(0, chr1->nbgene()); + + double r = chr1->recombination_ratio(); + const galgo::Chromosome< T > &chrmat1 = *x[idx1]; + const galgo::Chromosome< T > &chrmat2 = *x[idx2]; + + for (int i = 0; i < chr1->nbgene(); i++) + { + chr1->initGene(i, chrmat1.get_value(i)); + } + chr1->initGene(pos, + (T)(r * chrmat2.get_value(pos) + (1.0 - r) * chrmat1 + .get_value(pos))); + + for (int i = 0; i < chr2->nbgene(); i++) + { + chr2->initGene(i, chrmat2.get_value(i)); + } + chr2->initGene(pos, + (T)(r * chrmat1.get_value(pos) + (1.0 - r) * chrmat2 + .get_value(pos))); + + // Transmit sigma + transmit_sigma< T >(r, chrmat1, chrmat2, chr1, chr2); +} + +template void RealValuedWholeArithmeticRecombination( + const galgo::Population< T > &x, + galgo::CHR< T > &chr1, + galgo::CHR< T > &chr2) +{ + // choosing randomly 2 chromosomes from mating population + int idx1 = galgo::uniform< int >(0, x.matsize()); + int idx2 = galgo::uniform< int >(0, x.matsize()); + if (x.matsize() >= 2) + { + while (idx1 == idx2) + { + idx2 = galgo::uniform< int >(0, x.matsize()); + } // find not unique parents + } + + double r = chr1->recombination_ratio(); + const galgo::Chromosome< T > &chrmat1 = *x[idx1]; + const galgo::Chromosome< T > &chrmat2 = *x[idx2]; + + for (int i = 0; i < chr1->nbgene(); i++) + { + chr1->initGene(i, + (T)(r * chrmat2.get_value(i) + (1.0 - r) * chrmat1 + .get_value(i))); + } + for (int i = 0; i < chr2->nbgene(); i++) + { + chr2->initGene(i, + (T)(r * chrmat1.get_value(i) + (1.0 - r) * chrmat2 + .get_value(i))); + } + + // Transmit sigma + transmit_sigma< T >(r, chrmat1, chrmat2, chr1, chr2); +} + + +// one-point random cross-over of 2 chromosomes +template void P1XO( + const galgo::Population< T > &x, + galgo::CHR< T > &chr1, + galgo::CHR< T > &chr2) +{ + // choosing randomly 2 chromosomes from mating population + int idx1 = galgo::uniform< int >(0, x.matsize()); + int idx2 = galgo::uniform< int >(0, x.matsize()); + if (x.matsize() >= 2) + { + while (idx1 == idx2) + { + idx2 = galgo::uniform< int >(0, x.matsize()); + } // find not unique parents + } + + // choosing randomly a position for cross-over + int pos = galgo::uniform< int >(0, chr1->size()); + + // transmitting portion of bits to new chromosomes + chr1->setPortion(*x[idx1], 0, pos); + chr2->setPortion(*x[idx2], 0, pos); + chr1->setPortion(*x[idx2], pos + 1); + chr2->setPortion(*x[idx1], pos + 1); + + double r = chr1->recombination_ratio(); + const galgo::Chromosome< T > &chrmat1 = *x[idx1]; + const galgo::Chromosome< T > &chrmat2 = *x[idx2]; + + // Transmit sigma + transmit_sigma< T >(r, chrmat1, chrmat2, chr1, chr2); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// two-point random cross-over of 2 chromosomes +template void P2XO( + const galgo::Population< T > &x, + galgo::CHR< T > &chr1, + galgo::CHR< T > &chr2) +{ + // choosing randomly 2 chromosomes from mating population + int idx1 = galgo::uniform< int >(0, x.matsize()); + int idx2 = galgo::uniform< int >(0, x.matsize()); + if (x.matsize() >= 2) + { + while (idx1 == idx2) + { + idx2 = galgo::uniform< int >(0, x.matsize()); + } // find not unique parents + } + + // choosing randomly 2 positions for cross-over + int pos1 = galgo::uniform< int >(0, chr1->size()); + int pos2 = galgo::uniform< int >(0, chr1->size()); + + // ordering these 2 random positions + int m = std::min(pos1, pos2); + int M = std::max(pos1, pos2); + + // transmitting portion of bits new chromosomes + chr1->setPortion(*x[idx1], 0, m); + chr2->setPortion(*x[idx2], 0, m); + chr1->setPortion(*x[idx2], m + 1, M); + chr2->setPortion(*x[idx1], m + 1, M); + chr1->setPortion(*x[idx1], M + 1); + chr2->setPortion(*x[idx2], M + 1); + + double r = chr1->recombination_ratio(); + const galgo::Chromosome< T > &chrmat1 = *x[idx1]; + const galgo::Chromosome< T > &chrmat2 = *x[idx2]; + + // Transmit sigma + transmit_sigma< T >(r, chrmat1, chrmat2, chr1, chr2); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// uniform random cross-over of 2 chromosomes +template void UXO( + const galgo::Population< T > &x, + galgo::CHR< T > &chr1, + galgo::CHR< T > &chr2) +{ + // choosing randomly 2 chromosomes from mating population + int idx1 = galgo::uniform< int >(0, x.matsize()); + int idx2 = galgo::uniform< int >(0, x.matsize()); + if (x.matsize() >= 2) + { + while (idx1 == idx2) + { + idx2 = galgo::uniform< int >(0, x.matsize()); + } // find not unique parents + } + + for (int j = 0; j < chr1->size(); ++j) + { + // choosing 1 of the 2 chromosomes randomly + if (galgo::proba(galgo::rng) < 0.50) + { + // adding its jth bit to new chromosome + chr1->addBit(x[idx1]->getBit(j)); + chr2->addBit(x[idx2]->getBit(j)); + } + else + { + // adding its jth bit to new chromosomes + chr1->addBit(x[idx2]->getBit(j)); + chr2->addBit(x[idx1]->getBit(j)); + } + } + + double r = chr1->recombination_ratio(); + const galgo::Chromosome< T > &chrmat1 = *x[idx1]; + const galgo::Chromosome< T > &chrmat2 = *x[idx2]; + + // Transmit sigma + transmit_sigma< T >(r, chrmat1, chrmat2, chr1, chr2); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// MUTATION METHODS + +/*-------------------------------------------------------------------------------------------------*/ + +// boundary mutation: replacing a chromosome gene by its lower or upper bound +template void BDM(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + // getting chromosome lower bound(s) + const std::vector< T > &lowerBound = chr->lowerBound(); + // getting chromosome upper bound(s) + const std::vector< T > &upperBound = chr->upperBound(); + + // looping on number of genes + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + // generating a random probability + if (galgo::proba(galgo::rng) < .5) + { + // replacing ith gene by lower bound + chr->initGene(i, lowerBound[i]); + } + else + { + // replacing ith gene by upper bound + chr->initGene(i, upperBound[i]); + } + } + } +} + +/*-------------------------------------------------------------------------------------------------*/ +// single point mutation: flipping a chromosome bit +template void SPM(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + // looping on chromosome bits + for (int i = 0; i < chr->size(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + // flipping ith bit + chr->flipBit(i); + } + } +} + + +template +void GAM_UncorrelatedOneStepSizeFixed(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + const std::vector< T > &lowerBound = chr->lowerBound(); + const std::vector< T > &upperBound = chr->upperBound(); + + double n = chr->nbgene(); + double tau = 1.0 / pow(n, 0.50); + + std::normal_distribution< double > distribution01(0.0, 1.0); + + // looping on number of genes (parameters) + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + T value = chr->get_value(i); + double sigma = chr->get_sigma(i); + + if (sigma < 0.001) // first time + { + sigma = chr->mutinfo()._sigma; + if (sigma < chr->mutinfo()._sigma_lowest) + { + sigma = chr->mutinfo()._sigma_lowest; + } + chr->sigma_update(i, sigma); + } + + double newsigma = sigma * std::exp(tau * distribution01(galgo::rng)); + if (newsigma < chr->mutinfo()._sigma_lowest) + { + newsigma = chr->mutinfo()._sigma_lowest; + } + chr->sigma_update(i, newsigma); + + double norm01 = distribution01(galgo::rng); + T step = (T)(newsigma * norm01); + + T newvalue = (T)(std::min< T >(std::max< T >(value + step, lowerBound[i]), + upperBound[i])); + chr->initGene(i, newvalue); + } + } +} + +template +void GAM_UncorrelatedOneStepSizeBoundary(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + const std::vector< T > &lowerBound = chr->lowerBound(); + const std::vector< T > &upperBound = chr->upperBound(); + + double n = (double)chr->nbgene(); + double tau = 1.0 / pow(n, 0.50); + + std::normal_distribution< double > distribution01(0.0, 1.0); + + // looping on number of genes + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + T value = chr->get_value(i); + double sigma = chr->get_sigma(i); + + if (sigma < 0.001) // first time + { + sigma = + (upperBound[i] - lowerBound[i]) * chr->mutinfo()._ratio_boundary; + if (sigma < chr->mutinfo()._sigma_lowest) + { + sigma = chr->mutinfo()._sigma_lowest; + } + chr->sigma_update(i, sigma); + } + + double newsigma = sigma * std::exp(tau * distribution01(galgo::rng)); + if (newsigma < chr->mutinfo()._sigma_lowest) + { + newsigma = chr->mutinfo()._sigma_lowest; + } + chr->sigma_update(i, newsigma); + + double norm01 = distribution01(galgo::rng); + T step = (T)(newsigma * norm01); + + T newvalue = (T)(std::min< T >(std::max< T >(value + step, lowerBound[i]), upperBound[i])); + chr->initGene(i, newvalue); + } + } +} + +template +void GAM_UncorrelatedNStepSize(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + const std::vector< T > &lowerBound = chr->lowerBound(); + const std::vector< T > &upperBound = chr->upperBound(); + + std::normal_distribution< double > distribution01(0.0, 1.0); + + double n = (double)chr->nbgene(); + double tau1 = 1.0 / pow(2.0 * n, 0.50); + double tau2 = 1.0 / pow(2.0 * pow(n, 0.50), 0.50); + + // looping on number of genes + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + T value = chr->get_value(i); + double sigma = chr->get_sigma(i); + + if (sigma < 0.001) // never copied from parent + { + sigma = chr->mutinfo()._sigma; + if (sigma < chr->mutinfo()._sigma_lowest) + { + sigma = chr->mutinfo()._sigma_lowest; + } + chr->sigma_update(i, sigma); + } + else + { + double factor1 = std::exp(tau1 * distribution01(galgo::rng)); + double factor2 = std::exp(tau2 * distribution01(galgo::rng)); + + double newsigma = sigma * factor1 * factor2; + if (newsigma < chr->mutinfo()._sigma_lowest) + { + newsigma = chr->mutinfo()._sigma_lowest; + } + + chr->sigma_update(i, newsigma); + sigma = newsigma; + } + + double norm01 = distribution01(galgo::rng); + T newvalue = (T)(std::min< T >(std::max< T >(value + (T)(sigma * norm01), + lowerBound[i]), + upperBound[i])); + chr->initGene(i, newvalue); + } + } +} + +template +void GAM_UncorrelatedNStepSizeBoundary(galgo::CHR< T > &chr, size_t current_iteration, double gaussian_step_size) +{ + double mutrate = chr->mutrate(); //mutrate = 1.0; + if (mutrate == 0.0) + { + return; + } + + const std::vector< T > &lowerBound = chr->lowerBound(); + const std::vector< T > &upperBound = chr->upperBound(); + ////todo the adaptive sigma over iterations for higher accurate + ////sigma from small to very small and jump to big if no gain for long iters +// auto nogens = galgo::GeneticAlgorithm::nogen; +// double std_dev = 0.5 - std::sqrt(current_iteration)/100; +// double gaussian_step_size = 0.3; +// if(current_iteration > 400) +// { +// std_dev = 0.4 - 0.01 * std::sqrt(current_iteration - 400); +// int random_big_mutation = galgo::uniform(0,10); +// std::cout << "random_big_mutation: " << random_big_mutation << " "; +// if(random_big_mutation > 8) +// { +// //keep exploration each generation +// std_dev = 0.8; +// } +// } + std::normal_distribution< double > distribution01(0.0, gaussian_step_size); //ea:0.4, boea:0.2 + std::normal_distribution< double > distribution11(1.0, 0.5); + + double n = (double)chr->nbgene(); + double tau1 = 1.0 / pow(2.0 * n, 0.50); //5d (n=5): 0.3162277660 + double tau2 = 1.0 / pow(2.0 * pow(n, 0.50), 0.50); //5d: 0.4728708045 + + // looping on number of genes, nbgene(): dimensions + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) //mutrate = 1.0, 0.8; + { + T value = chr->get_value(i); + double sigma = chr->get_sigma(i); //stddev per parameter + + if (sigma < 0.001) // never copied from parent + { + sigma = (upperBound[i] - lowerBound[i]) * chr->mutinfo()._ratio_boundary; + if (sigma < chr->mutinfo()._sigma_lowest) + { + sigma = chr->mutinfo()._sigma_lowest; + } + chr->sigma_update(i, sigma); + } + else + { + double factor1 = std::exp(tau1 * distribution01(galgo::rng)); + double factor2 = std::exp(tau2 * distribution01(galgo::rng)); +// std::cout << "sigma-" << i << " "<< sigma << " factor1: " << factor1 +// << " factor2: " << factor2 << " "; + double newsigma = sigma * factor1 * factor2; + if (newsigma < chr->mutinfo()._sigma_lowest) + { + newsigma = chr->mutinfo()._sigma_lowest; + } + + chr->sigma_update(i, newsigma); + sigma = newsigma; + } + + double norm01 = distribution01(galgo::rng); +// T newvalue = (T)(std::min< T >(std::max< T >(value + (T)(sigma * norm01), lowerBound[i]), upperBound[i])); + + //// random when value lower/bigger than lowerBound/upperBound + T newvalue = (T) (value + (T)(sigma * norm01)); + + while (newvalue < lowerBound[i]) + { + newvalue = distribution01(galgo::rng); //random when value lower than lowerBound + } + while (newvalue > upperBound[i]) + { + newvalue = distribution11(galgo::rng); + } + + chr->initGene(i, newvalue); + } + } +} + +/*-------------------------------------------------------------------------------------------------*/ +// Gaussian mutation: replacing a chromosome gene by near guassian value +template +void GAM_sigma_adapting_per_generation(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + const std::vector< T > &lowerBound = chr->lowerBound(); + const std::vector< T > &upperBound = chr->upperBound(); + + std::normal_distribution< double > distribution01(0.0, 1.0); + + // looping on number of genes + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + T value = chr->get_value(i); + double sigma = ((double)(upperBound[i] - lowerBound[i])) * chr->mutinfo()._ratio_boundary; + if (sigma < chr->mutinfo()._sigma_lowest) + { + sigma = chr->mutinfo()._sigma_lowest; + } + + double norm01; + + // sigma decreasing blindly with number generation produced + std::cout << "chr->nogen(): " << chr->nogen() << std::endl; + for (int z = 1; z < chr->nogen() / 2; z++) + { + norm01 = distribution01(galgo::rng); + sigma = std::max(double(0), sigma * exp(norm01)); + } + + std::normal_distribution< double > distribution((double)value, sigma); + double norm = distribution(galgo::rng); + T newvalue = (T)(std::min(std::max((T)norm, lowerBound[i]), upperBound[i])); + chr->initGene(i, newvalue); + } + } +} +/*-------------------------------------------------------------------------------------------------*/ + + +/*-------------------------------------------------------------------------------------------------*/ +// Gaussian mutation: replacing a chromosome gene by near gaussian value +template void GAM_sigma_adapting_per_mutation(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + const std::vector< T > &lowerBound = chr->lowerBound(); + const std::vector< T > &upperBound = chr->upperBound(); + + std::normal_distribution< double > distribution01(0.0, 1.0); + + // looping on number of genes + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + T value = chr->get_value(i); + double sigma = chr->get_sigma(i); + + if (sigma < 0.001) // never copied from parent + { + sigma = ((double)(upperBound[i] - lowerBound[i])) * chr->mutinfo() + ._ratio_boundary; + if (sigma < chr->mutinfo()._sigma_lowest) + { + sigma = chr->mutinfo()._sigma_lowest; + } + chr->sigma_update(i, sigma); + } + + std::normal_distribution< double > distribution((double)value, sigma); + double norm = distribution(galgo::rng); + T new_value = + (T)(std::min(std::max((T)norm, lowerBound[i]), upperBound[i])); + chr->initGene(i, new_value); + } + } +} +/*-------------------------------------------------------------------------------------------------*/ + + +// uniform mutation: replacing a chromosome gene by a new one +template void UNM(galgo::CHR< T > &chr) +{ + double mutrate = chr->mutrate(); + if (mutrate == 0.0) + { + return; + } + + // looping on number of genes + for (int i = 0; i < chr->nbgene(); ++i) + { + // generating a random probability + if (galgo::proba(galgo::rng) <= mutrate) + { + // replacing ith gene by a new one + chr->setGene(i); + } + } +} + +/*-------------------------------------------------------------------------------------------------*/ + +// ADAPTATION TO CONSTRAINT(S) METHODS + +/*-------------------------------------------------------------------------------------------------*/ + +// adapt population to genetic algorithm constraint(s) +template void DAC(galgo::Population< T > &x) +{ + // getting worst population objective function total result + double worstTotal = x.getWorstTotal(); + + for (auto it = x.begin(), end = x.end(); it != end; ++it) + { + // computing element constraint value(s) + const std::vector< double > &cst = (*it)->getConstraint(); + + // adapting fitness if any constraint violated + if (std::any_of(cst.cbegin(), cst.cend(), [](double x) -> bool + { return x >= 0.0; })) + { + (*it)->fitness = + worstTotal - std::accumulate(cst.cbegin(), cst.cend(), 0.0); + } + } +} + +//================================================================================================= + +#endif diff --git a/thirdparty/galgo/galgo/Galgo.hpp b/thirdparty/galgo/galgo/Galgo.hpp new file mode 100755 index 0000000000..553cf21055 --- /dev/null +++ b/thirdparty/galgo/galgo/Galgo.hpp @@ -0,0 +1,90 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef GALGO_H +#define GALGO_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/*-------------------------------------------------------------------------------------------------*/ + +namespace galgo { + +// forward declarations +template +struct ConfigInfo; + +template +struct MutationInfo; + +template +class BaseParameter; + +template +class Parameter; + +template +class GeneticAlgorithm; + +template +class Population; + +template +class Chromosome; + +// convenient typedefs +template +using CHR = std::shared_ptr>; + +template +using PAR = std::unique_ptr>; + +template +using TUP = std::tuple&...>; + +} + +/*-------------------------------------------------------------------------------------------------*/ + +#ifdef _OPENMP + #include + // getting maximum number of threads available + static const int MAX_THREADS = omp_get_max_threads(); +#endif + +/*-------------------------------------------------------------------------------------------------*/ + +#include "Randomize.hpp" +#include "Converter.hpp" +#include "Parameter.hpp" +#include "Evolution.hpp" +#include "Chromosome.hpp" +#include "Population.hpp" +#include "GeneticAlgorithm.hpp" + +//================================================================================================= + +#endif + diff --git a/thirdparty/galgo/galgo/GeneticAlgorithm.hpp b/thirdparty/galgo/galgo/GeneticAlgorithm.hpp new file mode 100755 index 0000000000..1150921fc9 --- /dev/null +++ b/thirdparty/galgo/galgo/GeneticAlgorithm.hpp @@ -0,0 +1,540 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef GENETICALGORITHM_HPP +#define GENETICALGORITHM_HPP + +namespace galgo { + +enum class MutationType { + MutationSPM, + MutationBDM, + MutationUNM, + MutationGAM_UncorrelatedOneStepSizeFixed, + MutationGAM_UncorrelatedOneStepSizeBoundary, + MutationGAM_UncorrelatedNStepSize, + MutationGAM_UncorrelatedNStepSizeBoundary, + MutationGAM_sigma_adapting_per_generation, + MutationGAM_sigma_adapting_per_mutation, +}; + +template +struct MutationInfo +{ + MutationInfo() : + _type(MutationType::MutationSPM), + _sigma(1.0), + _ratio_boundary(1.0 / 10.0), //default: 1.0/6.0 + _sigma_lowest(0.001) // default: 0.0001 + { + } + + MutationType _type; + double _sigma; + double _ratio_boundary; + double _sigma_lowest; +}; + +template +struct ConfigInfo +{ + ConfigInfo() : mutinfo() + { + // DEFAULTS + covrate = 0.4; // cross-over rate + mutrate = 0.6; // mutation rate usually is 1.0 for real-valued + SP = 1.5; + tolerance = 0.0; + recombination_ratio = 0.60; //Real Valued crossover ratio, can't be 0.5 because 0.5 will generate two same offsprings after Xover + + elitpop = 1; + tntsize = 2; // k-tournament size k=2/4, higher value higher pressure + precision = 10; + + Objective = nullptr; + Selection = TNT; // RWS, TNT, RNK, RSP, TRS + CrossOver = RealValuedSimpleArithmeticRecombination; // + //Mutation = SPM; // derived from by mutinfo._type + Adaptation = nullptr; + Constraint = nullptr; + FixedValue = nullptr; + StopCondition = nullptr; + + nbgen = 125; //The num of gens for EA, 131+19, 125+25, 122+28 + popsize = 10; + output = false; + } + + MutationInfo mutinfo; + + double covrate; + double mutrate; + double SP; + double tolerance; + double recombination_ratio; + + int elitpop; + //int matsize; // set to popsize when ga is constructed, maybe change by ga.matsize = ... after constructor and before ga.run() + int tntsize; + int precision; + + std::vector (*Objective)(const std::vector&); + void (*Selection)(Population&); + void (*CrossOver)(const Population&, CHR&, CHR&); + void (*Mutation)(CHR&); + void (*Adaptation)(Population&) = nullptr; + std::vector (*Constraint)(const std::vector&); + void (*FixedValue)(Population&, int k); + bool (*StopCondition)(galgo::GeneticAlgorithm&); + + std::vector force_value_flag; + std::vector force_value; + + int nbgen; + int popsize; + bool output; +}; + + +/*-------------------------------------------------------------------------------------------------*/ + +template +class GeneticAlgorithm +{ + template friend class Population; + template friend class Chromosome; + template using FuncKT = std::vector(*)(const std::vector&); + +protected: + Population pop; // population of chromosomes + std::vector> param; // parameter(s) + std::vector lowerBound; // parameter(s) lower bound + std::vector upperBound; // parameter(s) upper bound + std::vector initialSet; // initial set of parameter(s) + std::vector idx; // indexes for chromosome breakdown + +public: + // objective function pointer + FuncKT Objective; + + // selection method initialized to roulette wheel selection + void (*Selection)(Population&) = TNT; + + // cross-over method initialized to 1-point cross-over + void(*CrossOver)(const Population&, CHR&, CHR&) = RealValuedSimpleArithmeticRecombination; + + // mutation method initialized to single-point mutation + void (*Mutation)(CHR&, size_t current_iteration, double gaussian_step_size) = GAM_UncorrelatedNStepSizeBoundary; + + // adaptation to constraint(s) method + void (*Adaptation)(Population&) = nullptr; + + // constraint(s) + std::vector (*Constraint)(const std::vector&) = nullptr; + + MutationInfo mutinfo; + + double covrate = 0.4; // cross-over rate + double mutrate = 0.6; // mutation rate + double SP = 1.5; // selective pressure for RSP selection method + double tolerance = 0.0; // terminal condition (inactive if equal to zero) + double recombination_ratio = 0.60; // Real Valued crossover ratio number = popsize * recombination_ratio + + int elitpop = 1; // elit population size + int matsize; // mating pool size, set to popsize by default + int tntsize = 2; // k-tournament size k=2/4, higher value higher pressure + int precision = 10; // precision for outputting results + bool output; // control if results must be outputted + + // Prototype to set fixed value of parameters while evolving + void (*FixedValue)(Population&, int k) = nullptr; + std::vector force_value_flag; + std::vector force_value; + + bool (*StopCondition)(GeneticAlgorithm&) = nullptr; + + // constructor + template GeneticAlgorithm(FuncKT objective, int popsize, int nbgen, bool output, MutationInfo mutinfo, const Parameter&...args); + template GeneticAlgorithm(const ConfigInfo& config, const Parameter&...args); + + // initialize populations to genetic algorithm, and return populations +// void init_run(std::vector< Eigen::VectorXd > samples); + + // run genetic algorithm + void run(std::string learner_algorithm, + size_t current_iteration, + double gaussian_step_size, + size_t switch_num, + std::vector samples, + std::vector observations); + + // return the last sample + Eigen::VectorXd get_sample(int i); + + // return best chromosome + const CHR& result() const; + + // print results for each new generation + void print(bool force = false) const; + + Population& get_pop() { return pop; } + +protected: + void setMutation(const MutationInfo& mt) + { + mutinfo = mt; +// if (mt._type == MutationType::MutationSPM) { Mutation = SPM; } +// else if (mt._type == MutationType::MutationBDM) { Mutation = BDM; } +// else if (mt._type == MutationType::MutationUNM) { Mutation = UNM; } +// else if (mt._type == MutationType::MutationGAM_UncorrelatedOneStepSizeFixed) { Mutation = GAM_UncorrelatedOneStepSizeFixed; } +// else if (mt._type == MutationType::MutationGAM_UncorrelatedOneStepSizeBoundary) { Mutation = GAM_UncorrelatedOneStepSizeBoundary; } +// else if (mt._type == MutationType::MutationGAM_UncorrelatedNStepSize) { Mutation = GAM_UncorrelatedNStepSize; } +// else if (mt._type == MutationType::MutationGAM_UncorrelatedNStepSizeBoundary) { Mutation = GAM_UncorrelatedNStepSizeBoundary; } +// else if (mt._type == MutationType::MutationGAM_sigma_adapting_per_generation) { Mutation = GAM_sigma_adapting_per_generation; } +// else if (mt._type == MutationType::MutationGAM_sigma_adapting_per_mutation) { Mutation = GAM_sigma_adapting_per_mutation; } +// else Mutation = SPM; + if (mt._type == MutationType::MutationGAM_UncorrelatedNStepSizeBoundary) { Mutation = GAM_UncorrelatedNStepSizeBoundary; } + + } + + GeneticAlgorithm(const ConfigInfo& config); // No parameters set + + int nbbit; // total number of bits per chromosome + int nbgen; // number of generations + int nogen = 0; // numero of generation + int nbparam; // number of parameters to be estimated + int popsize; // population size + + // end of recursion for initializing parameter(s) data + template + typename std::enable_if::type init(const TUP&); + + // recursion for initializing parameter(s) data + template + typename std::enable_if::type init(const TUP&); + + // check inputs validity + void check() const ; + + void init_from_config(const ConfigInfo& config); +}; + +/*-------------------------------------------------------------------------------------------------*/ +//template +//class GeneticAlgorithmN : public GeneticAlgorithm +//{ +//public: +// // constructor +// GeneticAlgorithmN(const ConfigInfo& config, +// std::vector& _lowerBound, +// std::vector& _upperBound, +// std::vector& _initialSet); +//}; + +/*-------------------------------------------------------------------------------------------------*/ +template +void GeneticAlgorithm::init_from_config(const ConfigInfo& config) +{ + setMutation(config.mutinfo); // Mutation is set here + + Objective = config.Objective; + Selection = config.Selection; + CrossOver = config.CrossOver; + Adaptation = config.Adaptation; + Constraint = config.Constraint; + FixedValue = config.FixedValue; + StopCondition = config.StopCondition; + + covrate = config.covrate; + mutrate = config.mutrate; + SP = config.SP; + tolerance = config.tolerance; + recombination_ratio = config.recombination_ratio; + + elitpop = config.elitpop; + tntsize = config.tntsize; + precision = config.precision; + + force_value_flag = config.force_value_flag; + force_value = config.force_value; + + nbgen = config.nbgen; + popsize = config.popsize; + matsize = popsize; // matsize default to popsize + output = config.output; + + nogen = 0; +} + +template template +GeneticAlgorithm::GeneticAlgorithm(const ConfigInfo& config, const Parameter&...args) +{ + init_from_config(config); + + nbbit = sum(N...); + nbparam = sizeof...(N); + TUP tp(args...); + init(tp); +} + +template +GeneticAlgorithm::GeneticAlgorithm(const ConfigInfo& config) +{ + init_from_config(config); +} + +// constructor +template template +GeneticAlgorithm::GeneticAlgorithm(FuncKT objective, int popsize, int nbgen, bool output, MutationInfo mutinfo, const Parameter&...args) +{ + setMutation(mutinfo); + Objective = objective; + + // getting total number of bits per chromosome + nbbit = sum(N...); + nbgen = nbgen; + + // getting number of parameters in the pack + nbparam = sizeof...(N); + + popsize = popsize; + matsize = popsize; + output = output; + + // unpacking parameter pack in tuple + TUP tp(args...); + + // initializing parameter(s) data + init(tp); +} + +//// constructor +//template +//GeneticAlgorithmN::GeneticAlgorithmN(const ConfigInfo& _config, std::vector& _lowerBound, std::vector& _upperBound, std::vector& _initialSet) +// : GeneticAlgorithm(_config) +//{ +// for (int i = 0; i<_lowerBound.size(); i++) +// { +// std::vector w; +// w.push_back(_lowerBound[i]); w.push_back(_upperBound[i]); w.push_back(_initialSet[i]); +// Parameter p(w); +// param.emplace_back(new decltype(p)(p)); +// +// if (i == 0) idx.push_back(0); +// else idx.push_back(idx[i - 1] + PARAM_NBIT); +// } +// lowerBound = _lowerBound; +// upperBound = _upperBound; +// initialSet = _initialSet; +// +// nbbit = (int)_lowerBound.size()*PARAM_NBIT; +// nbparam = (int)_lowerBound.size(); +//}; + +/*-------------------------------------------------------------------------------------------------*/ + +// end of recursion for initializing parameter(s) data +template template +inline typename std::enable_if::type +GeneticAlgorithm::init(const TUP& tp) {} + +// recursion for initializing parameter(s) data +template template +inline typename std::enable_if::type +GeneticAlgorithm::init(const TUP& tp) +{ + // getting Ith parameter in tuple + auto par = std::get(tp); + + // getting Ith parameter initial data + const std::vector& data = par.getData(); + + // copying parameter data + param.emplace_back(new decltype(par)(par)); + + lowerBound.push_back(data[0]); + upperBound.push_back(data[1]); + + // if parameter has initial value + if (data.size() > 2) { + initialSet.push_back(data[2]); + } + // setting indexes for chromosome breakdown + if (I == 0) { + idx.push_back(0); + } else { + idx.push_back(idx[I - 1] + par.size()); + } + // recursing + init(tp); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// check inputs validity +template +void GeneticAlgorithm::check() const +{ + if (!initialSet.empty()) { + for (int i = 0; i < nbparam; ++i) { + if (initialSet[i] < lowerBound[i] || initialSet[i] > upperBound[i]) { + throw std::invalid_argument("Error: in class galgo::Parameter, initial parameter value cannot be outside the parameter boundaries, please choose a value between its lower and upper bounds."); + } + } + if (initialSet.size() != (unsigned)nbparam) { + throw std::invalid_argument("Error: in class galgo::GeneticAlgorithm, initial set of parameters does not have the same dimension than the number of parameters, please adjust."); + } + } + if (SP < 1.0 || SP > 2.0) { + throw std::invalid_argument("Error: in class galgo::GeneticAlgorithm, selective pressure (SP) cannot be outside [1.0,2.0], please choose a real value within this interval."); + } + if (elitpop > popsize || elitpop < 0) { + throw std::invalid_argument("Error: in class galgo::GeneticAlgorithm, elit population (elitpop) cannot outside [0,popsize], please choose an integral value within this interval."); + } + if (covrate < 0.0 || covrate > 1.0) { + throw std::invalid_argument("Error: in class galgo::GeneticAlgorithm, cross-over rate (covrate) cannot outside [0.0,1.0], please choose a real value within this interval."); + } +} + +/*-------------------------------------------------------------------------------------------------*/ +// initialize populations to genetic algorithm +//template +//void GeneticAlgorithm::init_run(std::vector< Eigen::VectorXd > samples) +//{ +// std::cout << "Running init_run() for initial random sampling in EA ...\n"; +// double bestResult; +// double prevBestResult; +// // checking inputs validity +// check(); +// +// // setting adaptation method to default if needed +// if (Constraint != nullptr && Adaptation == nullptr) { +// Adaptation = DAC; +// } +// +// // initializing population +// pop = Population(*this); +// +// // creating population +// pop.creation(samples); +// +// // initializing best result and previous best result +// ////TODO different bestResult for Revolve +// bestResult = pop(0)->getTotal(); +// prevBestResult = bestResult; +// +// // outputting results +// if (output) print(); +//} + +template +Eigen::VectorXd GeneticAlgorithm::get_sample(int i) +{ + // return the i_th individual + std::vector individual = pop.get_individual(i); + // transfer std::vector to Eigen::VectorXd + Eigen::VectorXd sample = Eigen::Map(individual.data(), individual.size()); + return sample; // return Eigen::VectorXd +} + +/*-------------------------------------------------------------------------------------------------*/ +// run genetic algorithm +template +void GeneticAlgorithm::run(std::string learner_algorithm, + size_t current_iteration, + double gaussian_step_size, + size_t switch_num, + std::vector samples, + std::vector observations) +{ +// std::cout << "Running run() ....\n"; + // checking inputs validity + check(); + + // setting adaptation method to default if needed + if (Constraint != nullptr && Adaptation == nullptr) { + Adaptation = DAC; + } + + // initializing population + pop = Population(*this); + + // creating population + pop.creation(learner_algorithm, current_iteration, switch_num, samples, observations); + + // outputting results + if (output) print(); + + // starting population evolution + pop.evolution(current_iteration, gaussian_step_size); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// return best chromosome +template +inline const CHR& GeneticAlgorithm::result() const +{ + return pop(0); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// print results for each new generation +template +void GeneticAlgorithm::print(bool force) const +{ + // getting best parameter(s) from best chromosome + std::vector< T > bestParam = pop(0)->getParam(); + std::vector< double > bestResult = pop(0)->getResult(); + + std::cout << " Generation = " << std::setw(std::to_string(nbgen).size()) + << nogen << " |"; + for (int i = 0; i < nbparam; ++i) + { + std::cout << " X"; + if (nbparam > 1) + { + std::cout << std::to_string(i + 1); + } + std::cout << " = " << std::setw(2 + precision) << std::fixed + << std::setprecision(precision) << bestParam[i] << " |"; + } + for (unsigned i = 0; i < bestResult.size(); ++i) + { + std::cout << " F"; + if (bestResult.size() > 1) + { + std::cout << std::to_string(i + 1); + } + std::cout << "(x) = " << std::setw(12) << std::fixed + << std::setprecision(precision) << bestResult[i]; + + if(nogen not_eq 0) + { + //save the best value + std::ofstream value; + value.open("../value.txt", std::ios::app); + value << std::fixed << bestResult[i] << std::endl; + } + + if (i < bestResult.size() - 1) + { + std::cout << " |"; + } + else + { + std::cout << "\n"; + } + } + std::cout << std::endl; +} + +//================================================================================================= + +} + +#endif diff --git a/thirdparty/galgo/galgo/Parameter.hpp b/thirdparty/galgo/galgo/Parameter.hpp new file mode 100755 index 0000000000..41b2a9070d --- /dev/null +++ b/thirdparty/galgo/galgo/Parameter.hpp @@ -0,0 +1,126 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef PARAMETER_H +#define PARAMETER_H + +namespace galgo { + +// end of recursion for computing the sum of a parameter pack of integral numbers +int sum(int first) +{ + return first; +} + +// recursion for computing the sum of a parameter pack of integral numbers +template +int sum(int first, Args...args) +{ + return first + sum(args...); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// abstract base class for Parameter objects +template +class BaseParameter +{ +public: + virtual ~BaseParameter() {} + virtual std::string encode() const = 0; + virtual std::string encode(T z) const = 0; + virtual T decode(const std::string& y) const = 0; + virtual int size() const = 0; + virtual const std::vector& getData() const = 0; +}; + +/*-------------------------------------------------------------------------------------------------*/ + +template +class Parameter : public BaseParameter +{ + template + friend class Chromosome; + +private: + std::vector data; // contains lower bound, upper bound and initial value (optional) + +public: + // default constructor + Parameter() + { + } + + // constructor + Parameter(const std::vector& data) + { + if (data.size() < 2) { + throw std::invalid_argument("Error: in class galgo::Parameter, argument must contain at least 2 elements of type T, the lower bound and the upper bound, please adjust."); + } + if (data[0] >= data[1]) { + throw std::invalid_argument("Error: in class galgo::Parameter, first argument (lower bound) cannot be equal or greater than second argument (upper bound), please amend."); + } + this->data = data; + } + // return encoded parameter size in number of bits + int size() const override { + return N; + } + // return parameter initial data + const std::vector& getData() const override { + return data; + } + + +private: + // encoding random unsigned integer + std::string encode() const override + { + std::string str = GetBinary(galgo::Randomize::generate()); + return str.substr(str.size() - N, N); + } + + // encoding known unsigned integer + std::string encode(T z) const override + { + if (std::is_integral::value) + { + uint64_t value = (uint64_t)(z - data[0]); + std::string str = GetBinary(value); + return str.substr(str.size() - N, N); + } + else + { + uint64_t value = (uint64_t)(Randomize::MAXVAL * (z - data[0]) / (data[1] - data[0])); + std::string str = GetBinary(value); + return str.substr(str.size() - N, N); + } + } + + // decoding string + T decode(const std::string& str) const override + { + if (std::is_integral::value) + { + int64_t d0 = (int64_t)data[0]; + int64_t r = d0 + GetValue(str); + int64_t v = std::min(std::max(r, static_cast(data[0])), static_cast(data[1])); + return (T)v; + } + else + { + // decoding string to real value + // Randomize::MAXVAL=> 0,1,3,7,15,31,... + return (T)(data[0] + (GetValue(str) / static_cast(Randomize::MAXVAL)) * (data[1] - data[0])); + } + } +}; + +//================================================================================================= + +} + +#endif diff --git a/thirdparty/galgo/galgo/Population.hpp b/thirdparty/galgo/galgo/Population.hpp new file mode 100755 index 0000000000..32ef972d4c --- /dev/null +++ b/thirdparty/galgo/galgo/Population.hpp @@ -0,0 +1,604 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef POPULATION_HPP +#define POPULATION_HPP + +#include +#include +//#include "opencv2/highgui/highgui.hpp" +//#include "opencv2/core/core.hpp" +#include +#include + +namespace galgo +{ + + template class Population + { + public: + // nullary constructor + Population() + { + } + + // constructor + Population(const GeneticAlgorithm &ga); + + // create a population of chromosomes + void creation(std::string learner_algorithm, + size_t current_iteration, + size_t switch_num, + std::vector< Eigen::VectorXd> samples, + std::vector observations); + + // evolve population, get next generation + void evolution(size_t current_iteration, double gaussian_step_size); + + //get individuals + std::vector get_individual(int i); + + // access element in current population at position pos + const CHR &operator()(int pos) const; + + // access element in mating population at position pos + const CHR &operator[](int pos) const; + + // return iterator to current population beginning + typename std::vector< CHR < T>>:: + + iterator begin(); + + // return const iterator to current population beginning + typename std::vector< CHR < T>>:: + + const_iterator cbegin() const; + + // return iterator to current population ending + typename std::vector< CHR < T>>:: + + iterator end(); + + // return const iterator to current population ending + typename std::vector< CHR < T>>:: + + const_iterator cend() const; + + // select element at position pos in current population and copy it into mating population + void select(int pos); + + // set all fitness to positive values + void adjustFitness(); + + // compute fitness sum of current population + double getSumFitness() const; + + // get worst objective function total result from current population + double getWorstTotal() const; + + // return population size + int popsize() const; + + // return mating population size + int matsize() const; + + // return tournament size + int tntsize() const; + + // return numero of generation + int nogen() const; + + // return number of generations + int nbgen() const; + + // return selection pressure + double SP() const; + + const GeneticAlgorithm *ga_algo() { return ptr;} + + std::vector>& get_newpop() { return newpop;} + + std::vector>& get_curpop() { return curpop;} + + private: + std::vector< CHR < T>> curpop; // current population + std::vector< CHR < T>> matpop; // mating population + std::vector< CHR < T>> newpop; // new population + + const GeneticAlgorithm *ptr = nullptr; // pointer to genetic algorithm + int nbrcrov; // number of cross-over + int matidx; // mating population index + + // elitism => saving best chromosomes in new population + void elitism(); + + // create new population from recombination of the old one + void recombination(size_t current_iteration, double gaussian_step_size); + + // complete new population randomly + void completion(size_t current_iteration, double gaussian_step_size); + + // update population (adapting, sorting) + void updating(); + + bool compare_Xd(const Eigen::VectorXd& lhs, const Eigen::VectorXd& rhs) + { + return lhs(0) < rhs(0); + } + }; + + /*-------------------------------------------------------------------------------------------------*/ + + // constructor + template + Population< T >::Population(const GeneticAlgorithm &ga) + { + ptr = &ga; + nbrcrov = (int)ceil(ga.covrate * (ga.popsize - ga.elitpop)); + + // adjusting nbrcrov (must be an even number) + if (nbrcrov % 2 != 0) + { + nbrcrov += 1; + } + + // for convenience, we add elitpop to nbrcrov + nbrcrov += ga.elitpop; + + // allocating memory + curpop.resize(ga.popsize); + matpop.resize(ga.matsize); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // create a population of chromosomes + template void Population< T >::creation(std::string learner_algorithm, + size_t current_iteration, + size_t switch_num, + std::vector< Eigen::VectorXd> samples, + std::vector< Eigen::VectorXd> observations) + { + int start = 0; + + // initializing first chromosome + if (!ptr->initialSet.empty()) + { + curpop[0] = std::make_shared< Chromosome< T>>(*ptr); + curpop[0]->initialize(); //create new chromosome by encoding value to binary + curpop[0]->evaluate(); //encoding binary to value and calculating fitness + start++; + } + // getting the rest + #ifdef _OPENMP + #pragma omp parallel for num_threads(MAX_THREADS) + #endif + + if(learner_algorithm == "EA") + { + for (int i = start; i < ptr->popsize; ++i) + { + curpop[i] = std::make_shared< Chromosome< T>>(*ptr); + curpop[i]->create(i, samples[i]); + curpop[i]->evaluate0gen(observations[i]); //row 316 in Chromosome.hpp + } + } + +/////**** the interface to bayesian optimization ****/// + if(learner_algorithm == "BOEA") + { + std::cout << "switch_num: " << switch_num << std::endl; + // load the individuals from BO + if(current_iteration == switch_num) + { + int dimens = samples[0].size(); // curpop[i]->nbgene() + std::cout << "dimens: " << dimens << std::endl; + std::vector< Eigen::VectorXd > boea_observations = observations; + std::cout << " boea_observations.size(): " << boea_observations.size() << std::endl; + std::sort(boea_observations.begin(), boea_observations.end(), [](const Eigen::VectorXd& lhs, const Eigen::VectorXd& rhs) {return lhs(0) > rhs(0);}); + std::cout << "boea_observations: " << std::endl; + for (int i = 0; i < boea_observations.size(); i++) + { + std::cout << boea_observations[i] << " "; + } + std::cout << std::endl; + + std::vector< Eigen::VectorXd > boea_samples(ptr->popsize); + for (int i = 0; i < ptr->popsize; i++) + { + for (int j = 0; j < boea_observations.size(); j++) + { + if (boea_observations[i] == observations[j]) + { + std::cout << "i: " << i << " " << boea_observations[i] << " " + << observations[j] << std::endl; + std::cout << "samples[" << j << "] " << samples[j] << std::endl; + boea_samples[i] = samples[j]; + std::cout << "boea_samples[" << i << "]: " << boea_samples[i] << std::endl; + } + } + } + + for (int i = start; i < ptr->popsize; ++i) + { + curpop[i] = std::make_shared< Chromosome< T>>(*ptr); + curpop[i]->create(i, boea_samples[i]); + curpop[i]->evaluate0gen(boea_observations[i]); //row 316 in Chromosome.hpp + } + } + else + { + for (int i = start; i < ptr->popsize; ++i) + { + curpop[i] = std::make_shared< Chromosome< T>>(*ptr); + curpop[i]->create(i, samples[i]); + curpop[i]->evaluate0gen(observations[i]); //row 316 in Chromosome.hpp + } + } + } + + // updating population: sorting chromosomes from best to worst fitness + this->updating(); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // population evolution (selection, recombination, completion, mutation), get next generation + template void Population< T >::evolution(size_t current_iteration, double gaussian_step_size) + { + // initializing mating population index + matidx = 0; + + // selecting mating population + // curpop[] -> matpop[] + ptr->Selection(*this); + + // applying elitism if required + // curpop[] -> newpop[0...elitpop-1] + this->elitism(); + + // crossing-over mating population + // matpop[] -> newpop[elitpop...nbrcrov-1] // crossover + mutation + this->recombination(current_iteration, gaussian_step_size); + + // completing new population // only mutation + // matpop[] -> newpop[nbrcrov...popsize] + this->completion(current_iteration, gaussian_step_size); + + // moving new population into current population for next generation + curpop = std::move(newpop); +// // galgo::CHR< T > &chr +// std::cout << "nbgene: " << curpop[0]->nbgene() << std::endl; +// double individual = curpop[1]->get_value(0); +// std::cout << "curpop.begin(): " << individual << std::endl; + + // updating population, sorting chromosomes from best to worst fitnes +// this->updating(); + +// std::cout << "curpop.begin(): " << curpop[1]->get_value(0) << std::endl; + } + + template + std::vector Population< T >::get_individual(int i) + { + int nbparams = curpop[i]->nbgene(); + std::vector individual(nbparams); + //get the parameters of i_th individual + for(int j = 0; j < nbparams; j++) + { + individual[j] = curpop[i]->get_value(j); + std::cout << individual[j] << " "; + } + std::cout << std::endl; + return individual; + } + /*-------------------------------------------------------------------------------------------------*/ + + // elitism => saving best chromosomes in new population, making a copy of each elit chromosome + template void Population< T >::elitism() + { + // (re)allocating new population + newpop.resize(ptr->popsize); + + if (ptr->elitpop > 0) + { + // copying elit chromosomes into new population + std::transform(curpop.cbegin(), + curpop.cend(), + newpop.begin(), + [](const CHR< T > &chr) -> CHR< T > + { return std::make_shared< Chromosome< T>>(*chr); }); + + for (size_t i = 0; i < curpop.size(); i++) + { + transmit_sigma< T >(*curpop[i], *newpop[i]); + } + } + } + + /*-------------------------------------------------------------------------------------------------*/ + + // create new population from recombination of the old one + template void Population< T >::recombination(size_t current_iteration, double gaussian_step_size) + { + // creating a new population by cross-over +#ifdef _OPENMP +#pragma omp parallel for num_threads(MAX_THREADS) +#endif + + for (int i = ptr->elitpop; i < nbrcrov; i = i + 2) + { + // initializing 2 new chromosome + newpop[i] = std::make_shared< Chromosome< T>>(*ptr); + newpop[i + 1] = std::make_shared< Chromosome< T>>(*ptr); + + // crossing-over mating population to create 2 new chromosomes + ptr->CrossOver(*this, newpop[i], newpop[i + 1]); + + // mutating new chromosomes + ptr->Mutation(newpop[i], current_iteration, gaussian_step_size); + ptr->Mutation(newpop[i + 1], current_iteration, gaussian_step_size); + + if (ptr->FixedValue != nullptr) + { + ptr->FixedValue(*this, i); + ptr->FixedValue(*this, i + 1); + } + + // evaluating new chromosomes + //std::cout << "Nu_evaluation_re: " << i; +// newpop[i]->evaluate(); + //std::cout << "Nu_evaluation_re- " << (i + 1); +// newpop[i + 1]->evaluate(); + } + } + + /*-------------------------------------------------------------------------------------------------*/ + + // complete new population + template void Population< T >::completion(size_t current_iteration, double gaussian_step_size) + { +#ifdef _OPENMP +#pragma omp parallel for num_threads(MAX_THREADS) +#endif + for (int i = nbrcrov; i < ptr->popsize; ++i) + { + // selecting chromosome randomly from mating population + int pos = uniform< int >(0, ptr->matsize); + newpop[i] = std::make_shared< Chromosome< T>>(*matpop[pos]); + transmit_sigma< T >(*matpop[pos], *newpop[i]); + + // mutating chromosome + ptr->Mutation(newpop[i], current_iteration, gaussian_step_size); + + if (ptr->FixedValue != nullptr) + { + ptr->FixedValue(*this, i); + } + //std::cout << "Nu_evaluation_com: " << i; + // evaluating chromosome +// newpop[i]->evaluate(); + } + } + + /*-------------------------------------------------------------------------------------------------*/ + + // update population (adapting, sorting) + template void Population< T >::updating() + { +// std::cout << "update population: sorting chromosomes from best to worst fitness" << std::endl; + // adapting population to constraints + if (ptr->Constraint != nullptr) + { + ptr->Adaptation(*this); + } + // sorting chromosomes from best to worst fitness + std::sort(curpop.begin(), curpop.end(), [](const CHR& chr1, const CHR& chr2)->bool{return chr1->fitness > chr2->fitness;}); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // access element in current population at position pos + template + const CHR &Population< T >::operator()(int pos) const + { +#ifndef NDEBUG + if (pos > ptr->popsize - 1) + { + throw std::invalid_argument( + "Error: in galgo::Population::operator()(int), exceeding current population memory."); + } +#endif + + return curpop[pos]; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // access element in mating population at position pos + template + const CHR &Population< T >::operator[](int pos) const + { +#ifndef NDEBUG + if (pos > ptr->matsize - 1) + { + throw std::invalid_argument( + "Error: in galgo::Population::operator[](int), exceeding mating population memory."); + } +#endif + + return matpop[pos]; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return iterator to current population beginning + template inline typename std::vector< CHR < T>> + + ::iterator Population< T >::begin() + { + return curpop.begin(); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return const iterator to current population beginning + template inline typename std::vector< CHR < T>> + + ::const_iterator Population< T >::cbegin() const + { + return curpop.cbegin(); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return iterator to current population ending + template inline typename std::vector< CHR < T>> + + ::iterator Population< T >::end() + { + return curpop.end(); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return const iterator to current population ending + template inline typename std::vector< CHR < T>> + + ::const_iterator Population< T >::cend() const + { + return curpop.cend(); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // select element at position pos in current population and copy it into mating population + template inline void Population< T >::select(int pos) + { +#ifndef NDEBUG + if (pos > ptr->popsize - 1) + { + throw std::invalid_argument( + "Error: in galgo::Population::select(int), exceeding current population memory."); + } + if (matidx == ptr->matsize) + { + throw std::invalid_argument( + "Error: in galgo::Population::select(int), exceeding mating population memory."); + } +#endif + + matpop[matidx] = curpop[pos]; + matidx++; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // set all fitness to positive values (used in RWS and SUS selection methods) + template void Population< T >::adjustFitness() + { + // getting worst population fitness + double worstFitness = curpop.back()->fitness; + + if (worstFitness < 0) + { + // getting best fitness + double bestFitness = curpop.front()->fitness; + // case where all fitness are equal and negative + if (worstFitness == bestFitness) + { + std::for_each(curpop.begin(), curpop.end(), [](CHR< T > &chr) -> void + { chr->fitness *= -1; }); + } + else + { + std::for_each(curpop.begin(), + curpop.end(), + [worstFitness](CHR< T > &chr) -> void + { chr->fitness -= worstFitness; }); + } + } + } + + /*-------------------------------------------------------------------------------------------------*/ + + // compute population fitness sum (used in TRS, RWS and SUS selection methods) + template inline double Population< T >::getSumFitness() const + { + return std::accumulate(curpop.cbegin(), curpop.cend(), 0.0, []( + double sum, + const CHR< T > &chr) -> double + { return sum + chr->fitness; }); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // get worst objective function total result from current population (used in constraint(s) adaptation) + template inline double Population< T >::getWorstTotal() const + { + auto it = std::min_element(curpop.begin(), curpop.end(), []( + const CHR< T > &chr1, + const CHR< T > &chr2) -> bool + { return chr1->getTotal() < chr2->getTotal(); }); + return (*it)->getTotal(); + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return population size + template inline int Population< T >::popsize() const + { + return ptr->popsize; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return mating population size + template inline int Population< T >::matsize() const + { + return ptr->matsize; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return tournament size + template inline int Population< T >::tntsize() const + { + return ptr->tntsize; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return numero of generation + template inline int Population< T >::nogen() const + { + return ptr->nogen; + } + + + /*-------------------------------------------------------------------------------------------------*/ + + // return number of generations + template inline int Population< T >::nbgen() const + { + return ptr->nbgen; + } + + /*-------------------------------------------------------------------------------------------------*/ + + // return selection pressure + template inline double Population< T >::SP() const + { + return ptr->SP; + } + + //================================================================================================= + +} + +#endif + + diff --git a/thirdparty/galgo/galgo/Randomize.hpp b/thirdparty/galgo/galgo/Randomize.hpp new file mode 100755 index 0000000000..2a5e96bd91 --- /dev/null +++ b/thirdparty/galgo/galgo/Randomize.hpp @@ -0,0 +1,75 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef RANDOMIZE_H +#define RANDOMIZE_H + +namespace galgo { + +// template metaprogramming for getting maximum unsigned integral value from N bits +template +struct MAXVALUE +{ + enum : uint64_t{ value = 2 * MAXVALUE::value }; +}; + +// template specialization for initial case N = 0 +template <> +struct MAXVALUE<0> +{ + enum { value = 1 }; +}; + +/*-------------------------------------------------------------------------------------------------*/ + +// Mersenne Twister 19937 pseudo-random number generator +std::random_device rand_dev; +std::mt19937_64 rng(rand_dev()); + +// generate uniform random probability in range [0,1) +std::uniform_real_distribution<> proba(0, 1); + +/*-------------------------------------------------------------------------------------------------*/ + +// generate a uniform random number within the interval [min,max) +template +inline T uniform(T min, T max) +{ + #ifndef NDEBUG + if (min >= max) { + throw std::invalid_argument("Error: in galgo::uniform(T, T), first argument must be < to second argument."); + // std::cout << "Error: in galgo::uniform(T, T), first argument must be < to second argument." << std::endl; + } + #endif + + return (T) ( min + proba(rng) * (max - min)); +} + +/*-------------------------------------------------------------------------------------------------*/ + +// static class for generating random unsigned integral numbers +template +class Randomize +{ + static_assert(N > 0 && N <= 64, "in class galgo::Randomize, template parameter N cannot be ouside interval [1,64], please choose an integer within this interval."); + +public: + // computation only done once for each different N + static constexpr uint64_t MAXVAL = MAXVALUE::value - 1; + + // generating random unsigned long long integer on [0,MAXVAL] + static uint64_t generate() { + // class constructor only called once for each different N + static std::uniform_int_distribution udistrib(0,MAXVAL); + return udistrib(rng); + } +}; + +//================================================================================================= + +} + +#endif diff --git a/thirdparty/galgo/galgo/TestFunction.hpp b/thirdparty/galgo/galgo/TestFunction.hpp new file mode 100755 index 0000000000..3af6632b55 --- /dev/null +++ b/thirdparty/galgo/galgo/TestFunction.hpp @@ -0,0 +1,358 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#ifndef TESTFUNCTION_HPP +#define TESTFUNCTION_HPP + +/********** 1# ACKLEY function N Dimensions **************/ +template +class AckleyObjective +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 5; + auto xx = x; + // transfer interval from [0, 1] to [-32.768, 32.768] + for (int i = 0; i < dim_in; i++) + { + xx[i] = 65.536 * x[i] - 32.768; + } + const double a = 20.; + const double b = 0.2; + const double c = 2 * M_PI; + double sum1 = 0.; + double sum2 = 0.; + for (size_t i = 0; i < dim_in; i++) + { + sum1 = sum1 + xx[i] * xx[i]; + sum2 = sum2 + std::cos(c * xx[i]); + } + double term1 = -a * std::exp(-b * std::sqrt(sum1 / dim_in)); + double term2 = -std::exp(sum2 / dim_in); + double obj = term1 + term2 + a + std::exp(1); + return {-obj}; //max = 0, at (0,...,0) + } +}; + +/********** 2# SCHWEFEL function N Dimensions **************/ +template +class SchwefelObjective //todo not accurate results +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 20; + auto xx = x; + //transfer [0, 1] to [-500, 500] [250, 500] [100,500] [200,500] + for (int i = 0; i < dim_in; i++) + { + xx[i] = 300. * x[i] + 200.; + } + double sum = 0.; + for (size_t i = 0; i < dim_in; i++) + { + sum = sum + xx[i] * sin(sqrt(abs(xx[i]))); + } + double obj = 418.9829 * dim_in - sum;//X1 = 0.9209721088 | X2 = 0.9209100604 | F(x) = -0.0004374512 + return {-obj}; //maximum = 0 with (420.9687, ..., 420.9687) + } +}; + +/********** 3# Ellipsoid function N Dimensions **************/ +template +class EllipsoidObjective +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 5; + double inner = 0., outer = 0.; + for (size_t i = 0; i < dim_in; ++i) + { + for(size_t j = 0; j < i; j++) + { + inner = inner + std::pow((131.072 * x[j] - 65.536), 2); //(-65.536, 65.536) + } + outer = outer + inner; + } + return {-outer}; //maximum = 0 at (0, ..., 0) + } +}; + +/********** 4# Sphere function N Dimensions **************/ +template +class SphereObjective +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 5; + double inner = 0.; + for (size_t i = 0; i < dim_in; ++i) + { + inner = inner + std::pow((10. * x[i] - 5.), 2); + } + return {-inner}; //maximum = 0 with (0, 0) + } +}; + +/********** 5# Rosenbrock function N Dimensions **************/ +template +class RosenbrockObjective //todo not good results +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 5; + auto xx = x; + // transfer interval from [0, 1] to [-5, 10] + for (int i = 0; i < dim_in; i++) + xx[i] = 15. * x[i] - 5.; + double sum = 0.; + double term = 0.; + double xnext = 0.; + for(size_t i = 0; i < (dim_in - 1); i++) + { + xnext = xx[i + 1]; + term = 100. * std::pow((xnext - xx[i] * xx[i]), 2.0) + std::pow((xx[i] - 1), 2.0); + sum = sum + term; + } + double obj = sum; + return {-obj}; //maximum = 0 with (1,...,1) + } +}; + +/********* 6# Michalewicz function N = 2/5/10 Dimensions **********/ +template +class MichalewiczObjective //todo not good results +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 5; + auto xx = x; + // transfer interval from [0, 1] to [0, pi] + for (int i = 0; i < dim_in; i++) + xx[i] = M_PI * x[i]; + double sum = 0.; + double term = 0.; + double m = 10.; + for(size_t i = 0; i < dim_in; i++) + { + term = std::sin(xx[i]) * std::pow(std::sin(i * xx[i] * xx[i]/M_PI), 2 * m); + sum = sum + term; + } + double obj = sum; + return {obj}; //max= -1.8013(2D) at (2.20,1.57)/-4.687658(5D)/-9.66015(10D) + } +}; + +/********** 7# StyblinskiTang function N Dimensions ****************/ +template +class StyblinskiTangObjective +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 20; + auto xx = x; + // transfer interval from [0, 1] to [-5, 5] [-4,4] + for (int i = 0; i < dim_in; i++) + xx[i] = 10. * x[i] - 5.; + double sum = 0.; + double term; + for(size_t i = 0; i < dim_in; i++) + { + term = std::pow(xx[i], 4.0) - 16 * xx[i] * xx[i] + 5 * xx[i]; + sum = sum + term; + } + double obj = sum/2.0; + return {-obj}; //max= 39.16599 * d, (5D:195.82995)at (-2.903534,...,-2.903534) + } +}; + +/********** Rastrigin function N Dimensions ****************/ +template +class RastriginObjective +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 20; + auto xx = x; + //transfer interval from [0, 1] to [-3, 3] [-2, 2] [-5, 5] + for (int i = 0; i < dim_in; i++) + xx[i] = 4. * x[i] - 2.; + double f = 10. * dim_in; + for (size_t i = 0; i < dim_in; ++i) + f += xx[i] * xx[i] - 10. * std::cos(2 * M_PI * xx[i]); + return {-f}; //maximum = 0 with (0, 0, 0, 0); + } +}; + +/********** Griewank function N Dimensions ****************/ +template +class GriewankObjective +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 20; + auto xx = x; + //transfer interval from [0, 1] to [-10, 10] or [-5, 5] [-6,6] + for (int i = 0; i < dim_in; i++) + xx[i] = 10. * x[i] - 5.; + double sum = 0.0, f = 1.0; + for (size_t i = 0; i < dim_in; ++i) + { + sum += xx[i] * xx[i]/4000; + f = f * std::cos(xx[i]/std::sqrt(i+1)); + } + double obj = sum - f + 1; + return {-obj}; //maximum = 0 with (0, 0, 0, 0); + } +}; + + +/********** 8# Powell function N >= 4 Dimensions *************/ +template +class PowellObjective //todo not good results +{ + public: + static std::vector Objective(const std::vector& x) + { + size_t dim_in = 5; + auto xx = x; + // transfer interval from [0, 1] to [-4, 5] + for (int i = 0; i < dim_in; i++) + xx[i] = 9 * x[i] - 4; + double sum = 0.; + double term1, term2, term3, term4; + for(size_t i = 0; i < dim_in/4; i++) + { + term1 = std::pow((xx[4 * i - 3] + 10 * xx[4 * i - 2]), 2.0); + term2 = 5 * std::pow((xx[4 * i - 1] - xx[4 * i]), 2.0); + term3 = std::pow(xx[4 * i - 2] - 2 * xx[4 * i - 1], 4.0); + term4 = 10 * std::pow(xx[4 * i - 3] - xx[4 * i], 4.0); + sum = sum + term1 + term2 + term3 + term4; + std::cout << std::endl << "i: " << i + << " sum: " << sum << " term1: " << term1 << " term2: " << term2 + << " term3: " << term3 << " term4: " << term4 << std::endl; + } + double obj = sum; + return {-obj}; //max= 0 at (0,...,0) + } +}; + +//// constraints example: +//template +//std::vector MyConstraint(const std::vector& x) +//{ +// double x0 = (double)x[0]; +// double x1 = (double)x[1]; +// return +// { +// //x[0]*x[1]+x[0]-x[1]+1.5, // 1) x * y + x - y + 1.5 <= 0 +// //10-x[0]*x[1] // 2) 10 - x * y <= 0 +// x0 - 2, // x0 <= 2 +// x1 - 2 // x1 <= 2 +// }; +//} + +/*Rastrigin Function*/ +template +double pso_rastrigin(std::vector< T > particle) +{ + double result(10. * static_cast (particle.size())), A(10.), PI(3.14159); + for (auto dim : particle) { + result += pow(dim, 2.) - (A * cos(2. * PI * dim)); + } + return (result); +} + +template +class rastriginObjective +{ +public: + static std::vector Objective(const std::vector& x) + { + std::vector xd(x.size()); + for (size_t i = 0; i < x.size(); i++) xd[i] = (double)x[i]; + + double obj = -pso_rastrigin(xd); + return { obj }; + } +}; + +/* +Griewank Function +*/ +template +double pso_griewank(std::vector< T > particle) +{ + double sum(0.), product(1.); + for (int i = 0; i < particle.size(); i++) { + sum += pow(particle[i], 2.); + product *= cos(particle[i] / sqrt(i + 1)); + } + return (1. + (sum / 4000.) - product); +} + +//template +//class GriewankObjective +//{ +//public: +// static std::vector Objective(const std::vector& x) +// { +// std::vector xd(x.size()); +// for (size_t i = 0; i < x.size(); i++) xd[i] = (double)x[i]; +// +// double obj = -pso_griewank(xd); +// return { obj }; +// } +//}; + +/* +Styblinski-Tang Function +Min = (-2.903534,...,--2.903534) +*/ +template +double pso_styb_tang(std::vector< T > particle) +{ + double result(0.); + for (auto dim : particle) { + result += pow(dim, 4.0) - (16. * pow(dim, 2.)) + (5. * dim); + } + return (result / 2.); +} + + +template +class SumSameAsPrdObjective +{ +public: + static std::vector Objective(const std::vector& x) + { + double x0 = (double)x[0]; + double x1 = (double)x[1]; + + int ix = (int)x0; + int iy = (int)x1; + double sum = ix + iy; + double prd = ix * iy; + double diff = std::fabs(sum - prd); + + double err = 1000 * diff * diff;; + err += (100 * std::fabs(x0 - ix)* std::fabs(x0 - ix) + 100 * std::fabs(x1 - iy)* std::fabs(x1 - iy)); + + double obj = -(diff + err); + return { obj }; + } +}; + + +#endif \ No newline at end of file diff --git a/thirdparty/galgo/galgo/example.cpp b/thirdparty/galgo/galgo/example.cpp new file mode 100755 index 0000000000..d372e547a3 --- /dev/null +++ b/thirdparty/galgo/galgo/example.cpp @@ -0,0 +1,143 @@ +//================================================================================================= +// Copyright (C) 2018 Alain Lanthier - All Rights Reserved +// License: MIT License See LICENSE.md for the full license. +// Original code 2017 Olivier Mallet (MIT License) +//================================================================================================= + +#include "Galgo.hpp" + +//------------------------------------------------------------------ +// Uncomment #define TEST_ALL_TYPE to test compiling of all types +// Uncomment #define TEST_BINAIRO to test GA for Binairos +//------------------------------------------------------------------ +//#define TEST_ALL_TYPE +//#define TEST_BINAIRO + +#include "TestFunction.hpp" + +#ifdef TEST_ALL_TYPE +#include "TestTypes.hpp" +#endif + + +template +void set_my_config(galgo::ConfigInfo<_TYPE>& config) +{ + // override some defaults + config.mutinfo._sigma = 1.0; + config.mutinfo._sigma_lowest = 0.01; + config.mutinfo._ratio_boundary = 0.10; + + config.covrate = 0.50; // 0.0 if no cros-over + config.mutrate = 0.05; + config.recombination_ratio = 0.50; + + config.elitpop = 5; + config.tntsize = 2; + config.Selection = TNT; + config.CrossOver = RealValuedSimpleArithmeticRecombination; + config.mutinfo._type = galgo::MutationType::MutationGAM_UncorrelatedNStepSizeBoundary; + + config.popsize = 100; + config.nbgen = 400; + config.output = true; +} + +int main() +{ +#ifdef TEST_ALL_TYPE + TEST_all_types(); + + system("pause"); + return 0; +#endif + +#ifdef TEST_BINAIRO + system("color F0"); + GA_Binairo::test_ga_binairo(4); // 0=resolve one free cell(hard), 1=resolve 4 free cells(very hard), 2=resolve 7 free cells(diabolical), 3 , 4==generate new grid + + system("pause"); + return 0; +#endif + + using _TYPE = float; // Suppport float, double, char, int, long, ... for parameters + const int NBIT = 32; // Has to remain between 1 and 64 + + // CONFIG + galgo::ConfigInfo<_TYPE> config; // A new instance of config get initial defaults + set_my_config<_TYPE>(config); // Override some defaults + + { + { + std::cout << std::endl; + std::cout << "SumSameAsPrd function 2x2 = 2+2"; + galgo::Parameter<_TYPE, NBIT> par1({ (_TYPE)1.5, (_TYPE)3, 3 }); // an initial value can be added inside the initializer list after the upper bound + galgo::Parameter<_TYPE, NBIT> par2({ (_TYPE)1.5, (_TYPE)3, 3 }); + + config.Objective = SumSameAsPrdObjective<_TYPE>::Objective; + galgo::GeneticAlgorithm<_TYPE> ga(config, par1, par2); + ga.run(); + } + + { + std::cout << std::endl; + std::cout << "Rosenbrock function"; + galgo::Parameter<_TYPE, NBIT > par1({ (_TYPE)-2.0,(_TYPE)2.0 }); + galgo::Parameter<_TYPE, NBIT > par2({ (_TYPE)-2.0,(_TYPE)2.0 }); + + config.Objective = RosenbrockObjective<_TYPE>::Objective; + galgo::GeneticAlgorithm<_TYPE> ga(config, par1, par2); + ga.run(); + } + + { + std::cout << std::endl; + std::cout << "Ackley function"; + galgo::Parameter<_TYPE, NBIT > par1({ (_TYPE)-4.0,(_TYPE)5.0 }); + galgo::Parameter<_TYPE, NBIT > par2({ (_TYPE)-4.0,(_TYPE)5.0 }); + + config.Objective = AckleyObjective<_TYPE>::Objective; + galgo::GeneticAlgorithm<_TYPE> ga(config, par1, par2); + ga.run(); + } + + { + std::cout << std::endl; + std::cout << "Rastrigin function"; + galgo::Parameter<_TYPE, NBIT > par1({ (_TYPE)-4.0,(_TYPE)5.0 }); + galgo::Parameter<_TYPE, NBIT > par2({ (_TYPE)-4.0,(_TYPE)5.0 }); + galgo::Parameter<_TYPE, NBIT > par3({ (_TYPE)-4.0,(_TYPE)5.0 }); + + config.Objective = rastriginObjective<_TYPE>::Objective; + galgo::GeneticAlgorithm<_TYPE> ga(config, par1, par2, par3); + ga.run(); + } + + { + std::cout << std::endl; + std::cout << "StyblinskiTang function Min = (-2.903534,...,--2.903534)"; + galgo::Parameter<_TYPE, NBIT > par1({ (_TYPE)-4.0,(_TYPE)4.0 }); + galgo::Parameter<_TYPE, NBIT > par2({ (_TYPE)-4.0,(_TYPE)4.0 }); + galgo::Parameter<_TYPE, NBIT > par3({ (_TYPE)-4.0,(_TYPE)4.0 }); + + config.Objective = StyblinskiTangObjective<_TYPE>::Objective; + galgo::GeneticAlgorithm<_TYPE> ga(config, par1, par2, par3); + ga.run(); + } + + { + std::cout << std::endl; + std::cout << "Griewank function"; + galgo::Parameter<_TYPE, NBIT > par1({ (_TYPE)-4.0,(_TYPE)5.0 }); + galgo::Parameter<_TYPE, NBIT > par2({ (_TYPE)-4.0,(_TYPE)5.0 }); + galgo::Parameter<_TYPE, NBIT > par3({ (_TYPE)-4.0,(_TYPE)5.0 }); + + config.Objective = GriewankObjective<_TYPE>::Objective; + galgo::GeneticAlgorithm<_TYPE> ga(config, par1, par2, par3); + ga.run(); + } + } + + system("pause"); +} +