diff --git a/.github/workflows/cpp.yml b/.github/workflows/cpp.yml index 35fe9813329..7b8656497bf 100644 --- a/.github/workflows/cpp.yml +++ b/.github/workflows/cpp.yml @@ -60,12 +60,12 @@ jobs: - name: Run cppcheck run: | cd build - cppcheck --enable=all --inline-suppr --suppress=missingIncludeSystem -I../src/cc/flwr/include ../src/cc/flwr/src + cppcheck --enable=all -I../src/cc/flwr/include ../src/cc/flwr/src - name: End-to-end test run: | cd examples/quickstart-cpp - cmake -S . -B build + cmake -DUSE_LOCAL_FLWR=ON -S . -B build cmake --build build pip install ../.. timeout 2m python server.py & diff --git a/examples/quickstart-cpp/CMakeLists.txt b/examples/quickstart-cpp/CMakeLists.txt index 552132b079c..b0aca2eaa2b 100644 --- a/examples/quickstart-cpp/CMakeLists.txt +++ b/examples/quickstart-cpp/CMakeLists.txt @@ -3,6 +3,7 @@ project(SimpleCppFlowerClient VERSION 0.10 DESCRIPTION "Creates a Simple C++ Flower client that trains a linear model on synthetic data." LANGUAGES CXX) set(CMAKE_CXX_STANDARD 17) +option(USE_LOCAL_FLWR "Use local Flower directory instead of fetching from GitHub" OFF) ###################### ### Download gRPC @@ -28,8 +29,24 @@ endif() ###################### ### FLWR_LIB +if(USE_LOCAL_FLWR) + set(FLWR_SOURCE_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/../..") +else() + FetchContent_Declare( + flwr_repo + GIT_REPOSITORY https://github.com/adap/flower.git + GIT_TAG main + ) + + FetchContent_GetProperties(flwr_repo) + if(NOT flwr_repo_POPULATED) + FetchContent_Populate(flwr_repo) + endif() + + set(FLWR_SOURCE_ROOT "${flwr_repo_SOURCE_DIR}") +endif() -set(FLWR_SDK_PATH "../../src/cc/flwr") +set(FLWR_SDK_PATH "${FLWR_SOURCE_ROOT}/src/cc/flwr") file(GLOB FLWR_SRCS "${FLWR_SDK_PATH}/src/*.cc") file(GLOB FLWR_PROTO_SRCS "${FLWR_SDK_PATH}/include/flwr/proto/*.cc") diff --git a/examples/quickstart-cpp/include/line_fit_model.h b/examples/quickstart-cpp/include/line_fit_model.h index 8a905f364fc..1355e17b18a 100644 --- a/examples/quickstart-cpp/include/line_fit_model.h +++ b/examples/quickstart-cpp/include/line_fit_model.h @@ -7,11 +7,11 @@ #include -#include "synthetic_dataset.h" #include "linear_algebra_util.h" +#include "synthetic_dataset.h" #include class LineFitModel { - public: +public: LineFitModel(int num_iterations, double learning_rate, int num_params); std::vector predict(std::vector> X); @@ -30,7 +30,7 @@ class LineFitModel { size_t get_model_size(); - private: +private: int num_iterations; int batch_size; double learning_rate; @@ -41,4 +41,4 @@ class LineFitModel { double compute_mse(std::vector true_y, std::vector pred); }; -#endif //FLOWER_CPP_LINE_FIT_MODEL_H +#endif // FLOWER_CPP_LINE_FIT_MODEL_H diff --git a/examples/quickstart-cpp/include/linear_algebra_util.h b/examples/quickstart-cpp/include/linear_algebra_util.h index 9451f971cd0..3ced2eb9aa8 100644 --- a/examples/quickstart-cpp/include/linear_algebra_util.h +++ b/examples/quickstart-cpp/include/linear_algebra_util.h @@ -8,17 +8,21 @@ #include class LinearAlgebraUtil { - public: - static std::vector subtract_vector(std::vector v1, std::vector v2); +public: + static std::vector subtract_vector(std::vector v1, + std::vector v2); - static std::vector multiply_matrix_vector(std::vector> mat, std::vector v); + static std::vector + multiply_matrix_vector(std::vector> mat, + std::vector v); static std::vector add_vector_scalar(std::vector v, double a); - static std::vector multiply_vector_scalar(std::vector v, double a); - - static std::vector> transpose_vector(std::vector> v); + static std::vector multiply_vector_scalar(std::vector v, + double a); + static std::vector> + transpose_vector(std::vector> v); }; -#endif //FLOWER_CPPV2_LINEAR_ALGEBRA_UTIL_H +#endif // FLOWER_CPPV2_LINEAR_ALGEBRA_UTIL_H diff --git a/examples/quickstart-cpp/include/simple_client.h b/examples/quickstart-cpp/include/simple_client.h index 894ecb26738..5997fbe1bda 100644 --- a/examples/quickstart-cpp/include/simple_client.h +++ b/examples/quickstart-cpp/include/simple_client.h @@ -11,40 +11,40 @@ * ********************************************************************************************************/ #pragma once #include "client.h" -#include "synthetic_dataset.h" #include "line_fit_model.h" +#include "synthetic_dataset.h" #include +#include +#include #include +#include #include #include -#include -#include #include -#include /** * Validate the network on the entire test set * */ class SimpleFlwrClient : public flwr_local::Client { - public: - SimpleFlwrClient(std::string client_id, - LineFitModel &model, +public: + SimpleFlwrClient(std::string client_id, LineFitModel &model, SyntheticDataset &training_dataset, SyntheticDataset &validation_dataset, SyntheticDataset &test_dataset); void set_parameters(flwr_local::Parameters params); virtual flwr_local::ParametersRes get_parameters() override; - virtual flwr_local::PropertiesRes get_properties(flwr_local::PropertiesIns ins) override; - virtual flwr_local::EvaluateRes evaluate(flwr_local::EvaluateIns ins) override; + virtual flwr_local::PropertiesRes + get_properties(flwr_local::PropertiesIns ins) override; + virtual flwr_local::EvaluateRes + evaluate(flwr_local::EvaluateIns ins) override; virtual flwr_local::FitRes fit(flwr_local::FitIns ins) override; - private: +private: int64_t client_id; LineFitModel &model; SyntheticDataset &training_dataset; SyntheticDataset &validation_dataset; SyntheticDataset &test_dataset; - }; diff --git a/examples/quickstart-cpp/include/synthetic_dataset.h b/examples/quickstart-cpp/include/synthetic_dataset.h index 07480bf9fea..ff37829d56e 100644 --- a/examples/quickstart-cpp/include/synthetic_dataset.h +++ b/examples/quickstart-cpp/include/synthetic_dataset.h @@ -5,11 +5,12 @@ #ifndef FLOWER_CPP_SYNTHETIC_DATASET_H #define FLOWER_CPP_SYNTHETIC_DATASET_H -#include #include +#include class SyntheticDataset { - public: - // Generates the synthetic dataset of size size around given vector m of size ms_size and given bias b. +public: + // Generates the synthetic dataset of size size around given vector m of size + // ms_size and given bias b. SyntheticDataset(std::vector ms, double b, size_t size); // Returns the size of the dataset. @@ -20,7 +21,7 @@ class SyntheticDataset { int get_features_count(); - private: +private: std::vector ms; double b; @@ -30,4 +31,4 @@ class SyntheticDataset { std::vector> data_points; }; -#endif //FLOWER_CPP_SYNTHETIC_DATASET_H +#endif // FLOWER_CPP_SYNTHETIC_DATASET_H diff --git a/examples/quickstart-cpp/src/line_fit_model.cc b/examples/quickstart-cpp/src/line_fit_model.cc index db6dcabde5f..cec43b7b279 100644 --- a/examples/quickstart-cpp/src/line_fit_model.cc +++ b/examples/quickstart-cpp/src/line_fit_model.cc @@ -5,141 +5,143 @@ #include "line_fit_model.h" #include "synthetic_dataset.h" -#include -#include #include +#include #include +#include -LineFitModel::LineFitModel(int num_iterations, double learning_rate, int num_params) - : num_iterations(num_iterations), learning_rate(learning_rate) { - std::random_device rd; - std::mt19937 mt(rd()); - std::uniform_int_distribution<> distr(-10.0, 10.0); - for (int i = 0; i < num_params; i++) { - this->pred_weights.push_back(distr(mt)); - } - - this->pred_b = 0.0; - this->batch_size = 64; +LineFitModel::LineFitModel(int num_iterations, double learning_rate, + int num_params) + : num_iterations(num_iterations), learning_rate(learning_rate) { + std::random_device rd; + std::mt19937 mt(rd()); + std::uniform_int_distribution<> distr(-10.0, 10.0); + for (int i = 0; i < num_params; i++) { + this->pred_weights.push_back(distr(mt)); + } + + this->pred_b = 0.0; + this->batch_size = 64; } std::vector LineFitModel::get_pred_weights() { - std::vector copy_of_weights(this->pred_weights); - return copy_of_weights; + std::vector copy_of_weights(this->pred_weights); + return copy_of_weights; } void LineFitModel::set_pred_weights(std::vector new_weights) { - this->pred_weights.assign(new_weights.begin(), new_weights.end()); + this->pred_weights.assign(new_weights.begin(), new_weights.end()); } -double LineFitModel::get_bias() { - return this->pred_b; -} -void LineFitModel::set_bias(double new_bias) { - this->pred_b = new_bias; -} +double LineFitModel::get_bias() { return this->pred_b; } +void LineFitModel::set_bias(double new_bias) { this->pred_b = new_bias; } -size_t LineFitModel::get_model_size() { - return this->pred_weights.size(); -}; +size_t LineFitModel::get_model_size() { return this->pred_weights.size(); }; std::vector LineFitModel::predict(std::vector> X) { - std::vector prediction(X.size(), 0.0); - for (int i = 0; i < X.size(); i++) { - for (int j = 0; j < X[i].size(); j++) { - prediction[i] += this->pred_weights[j] * X[i][j]; - } - prediction[i] += this->pred_b; + std::vector prediction(X.size(), 0.0); + for (int i = 0; i < X.size(); i++) { + for (int j = 0; j < X[i].size(); j++) { + prediction[i] += this->pred_weights[j] * X[i][j]; } + prediction[i] += this->pred_b; + } - return prediction; + return prediction; } -std::tuple LineFitModel::train_SGD(SyntheticDataset &dataset) { - int features = dataset.get_features_count(); - std::vector> data_points = dataset.get_data_points(); - - std::vector data_indices(dataset.size()); - for (int i = 0; i < dataset.size(); i++) { - data_indices.push_back(i); +std::tuple +LineFitModel::train_SGD(SyntheticDataset &dataset) { + int features = dataset.get_features_count(); + std::vector> data_points = dataset.get_data_points(); + + std::vector data_indices(dataset.size()); + for (int i = 0; i < dataset.size(); i++) { + data_indices.push_back(i); + } + + std::vector dW(features); + std::vector err(batch_size, 10000); + std::vector pW(features); + double training_error = 0.0; + for (int iteration = 0; iteration < num_iterations; iteration++) { + std::random_device rd; + std::mt19937 g(rd()); + std::shuffle(data_indices.begin(), data_indices.end(), g); + + std::vector> X(this->batch_size, + std::vector(features)); + std::vector y(this->batch_size); + + for (int i = 0; i < this->batch_size; i++) { + std::vector point = data_points[data_indices[i]]; + y[i] = point.back(); + point.pop_back(); + X[i] = point; } - std::vector dW(features); - std::vector err(batch_size, 10000); - std::vector pW(features); - double training_error = 0.0; - for (int iteration = 0; iteration < num_iterations; iteration++) { - std::random_device rd; - std::mt19937 g(rd()); - std::shuffle(data_indices.begin(), data_indices.end(), g); + pW = this->pred_weights; + double pB = this->pred_b; + double dB; - std::vector> X(this->batch_size, std::vector(features)); - std::vector y(this->batch_size); + std::vector pred = predict(X); - for (int i = 0; i < this->batch_size; i++) { - std::vector point = data_points[data_indices[i]]; - y[i] = point.back(); - point.pop_back(); - X[i] = point; - } + err = LinearAlgebraUtil::subtract_vector(y, pred); - pW = this->pred_weights; - double pB = this->pred_b; - double dB; + dW = LinearAlgebraUtil::multiply_matrix_vector( + LinearAlgebraUtil::transpose_vector(X), err); + dW = LinearAlgebraUtil::multiply_vector_scalar(dW, + (-2.0 / this->batch_size)); - std::vector pred = predict(X); + dB = (-2.0 / this->batch_size) * + std::accumulate(err.begin(), err.end(), 0.0); - err = LinearAlgebraUtil::subtract_vector(y, pred); + this->pred_weights = LinearAlgebraUtil::subtract_vector( + pW, LinearAlgebraUtil::multiply_vector_scalar(dW, learning_rate)); + this->pred_b = pB - learning_rate * dB; - dW = LinearAlgebraUtil::multiply_matrix_vector(LinearAlgebraUtil::transpose_vector(X), err); - dW = LinearAlgebraUtil::multiply_vector_scalar(dW, (-2.0 / this->batch_size)); - - dB = (-2.0 / this->batch_size) * std::accumulate(err.begin(), err.end(), 0.0); - - this->pred_weights = - LinearAlgebraUtil::subtract_vector(pW, LinearAlgebraUtil::multiply_vector_scalar(dW, learning_rate)); - this->pred_b = pB - learning_rate * dB; - - if (iteration % 250 == 0) { - training_error = this->compute_mse(y, predict(X)); - std::cout << "Iteration: " << iteration << " Training error: " << training_error << '\n'; - } - - } - std::cout <<"Local model:" << std::endl; - for (size_t i = 0; i < pred_weights.size(); i++) { - std::cout << " m" << i <<"_local = "<< std::fixed << pred_weights[i] << std::endl; + if (iteration % 250 == 0) { + training_error = this->compute_mse(y, predict(X)); + std::cout << "Iteration: " << iteration + << " Training error: " << training_error << '\n'; } - std::cout << " b_local = "<< std::fixed << pred_b << std::endl < #include +#include -std::vector LinearAlgebraUtil::subtract_vector(std::vector v1, std::vector v2) { - std::vector result(v1.size()); - for (int i = 0; i < v1.size(); i++) { - result[i] = v1[i] - v2[i]; - } - return result; +std::vector LinearAlgebraUtil::subtract_vector(std::vector v1, + std::vector v2) { + std::vector result(v1.size()); + for (int i = 0; i < v1.size(); i++) { + result[i] = v1[i] - v2[i]; + } + return result; } std::vector -LinearAlgebraUtil::multiply_matrix_vector(std::vector> mat, std::vector v) { - std::vector result(mat.size(), 0.0); - for (int i = 0; i < mat.size(); i++) { - result[i] = 0; - for (int j = 0; j < mat[0].size(); j++) { - result[i] += mat[i][j] * v[j]; - } +LinearAlgebraUtil::multiply_matrix_vector(std::vector> mat, + std::vector v) { + std::vector result(mat.size(), 0.0); + for (int i = 0; i < mat.size(); i++) { + result[i] = 0; + for (int j = 0; j < mat[0].size(); j++) { + result[i] += mat[i][j] * v[j]; } - return result; + } + return result; } -std::vector LinearAlgebraUtil::add_vector_scalar(std::vector v, double a) { - for (int i = 0; i < v.size(); i++) { - v[i] += a; - } - return v; +std::vector LinearAlgebraUtil::add_vector_scalar(std::vector v, + double a) { + for (int i = 0; i < v.size(); i++) { + v[i] += a; + } + return v; } -std::vector LinearAlgebraUtil::multiply_vector_scalar(std::vector v, double a) { - for (int i = 0; i < v.size(); i++) { - v[i] *= a; - } +std::vector +LinearAlgebraUtil::multiply_vector_scalar(std::vector v, double a) { + for (int i = 0; i < v.size(); i++) { + v[i] *= a; + } - return v; + return v; } -std::vector> LinearAlgebraUtil::transpose_vector(std::vector> v) { - std::vector> vT(v[0].size(), std::vector(v.size())); - for (int i = 0; i < v.size(); i++) { - for (int j = 0; j < v[0].size(); j++) { - vT[j][i] = v[i][j]; - } +std::vector> +LinearAlgebraUtil::transpose_vector(std::vector> v) { + std::vector> vT(v[0].size(), + std::vector(v.size())); + for (int i = 0; i < v.size(); i++) { + for (int j = 0; j < v[0].size(); j++) { + vT[j][i] = v[i][j]; } + } - return vT; + return vT; } diff --git a/examples/quickstart-cpp/src/simple_client.cc b/examples/quickstart-cpp/src/simple_client.cc index c246722ed15..2645401a27a 100644 --- a/examples/quickstart-cpp/src/simple_client.cc +++ b/examples/quickstart-cpp/src/simple_client.cc @@ -2,72 +2,75 @@ /** * Initializer */ -SimpleFlwrClient::SimpleFlwrClient(std::string client_id, - LineFitModel &model, +SimpleFlwrClient::SimpleFlwrClient(std::string client_id, LineFitModel &model, SyntheticDataset &training_dataset, SyntheticDataset &validation_dataset, SyntheticDataset &test_dataset) - : model(model), - training_dataset(training_dataset), - validation_dataset(validation_dataset), - test_dataset(test_dataset) { + : model(model), training_dataset(training_dataset), + validation_dataset(validation_dataset), test_dataset(test_dataset){ -}; + }; /** * Return the current local model parameters - * Simple string are used for now to test communication, needs updates in the future + * Simple string are used for now to test communication, needs updates in the + * future */ flwr_local::ParametersRes SimpleFlwrClient::get_parameters() { - // Serialize - std::vector pred_weights = this->model.get_pred_weights(); - double pred_b = this->model.get_bias(); - std::list tensors; + // Serialize + std::vector pred_weights = this->model.get_pred_weights(); + double pred_b = this->model.get_bias(); + std::list tensors; - std::ostringstream oss1, oss2; // Possibly unnecessary - oss1.write(reinterpret_cast(pred_weights.data()), pred_weights.size() * sizeof(double)); - tensors.push_back(oss1.str()); + std::ostringstream oss1, oss2; // Possibly unnecessary + oss1.write(reinterpret_cast(pred_weights.data()), + pred_weights.size() * sizeof(double)); + tensors.push_back(oss1.str()); - oss2.write(reinterpret_cast(&pred_b), sizeof(double)); - tensors.push_back(oss2.str()); + oss2.write(reinterpret_cast(&pred_b), sizeof(double)); + tensors.push_back(oss2.str()); - std::string tensor_str = "cpp_double"; - return flwr_local::ParametersRes(flwr_local::Parameters(tensors, tensor_str)); + std::string tensor_str = "cpp_double"; + return flwr_local::ParametersRes(flwr_local::Parameters(tensors, tensor_str)); }; void SimpleFlwrClient::set_parameters(flwr_local::Parameters params) { - std::list s = params.getTensors(); - std::cout << "Received " << s.size() <<" Layers from server:" << std::endl; - - if (s.empty() == 0) { - // Layer 1 - auto layer = s.begin(); - size_t num_bytes = (*layer).size(); - const char *weights_char = (*layer).c_str(); - const double *weights_double = reinterpret_cast(weights_char); - std::vector weights(weights_double, weights_double + num_bytes / sizeof(double)); - this->model.set_pred_weights(weights); - for (auto x : this->model.get_pred_weights()) - for (size_t j=0; j < this->model.get_pred_weights().size(); j++) - std::cout << " m"<< j <<"_server = " << std::fixed << this->model.get_pred_weights()[j] << std::endl; - - // Layer 2 = Bias - auto layer_2 = std::next(layer, 1); - num_bytes = (*layer_2).size(); - const char *bias_char = (*layer_2).c_str(); - const double *bias_double = reinterpret_cast(bias_char); - this->model.set_bias(bias_double[0]); - std::cout << " b_server = " << std::fixed << this->model.get_bias() << std::endl; - - } - + std::list s = params.getTensors(); + std::cout << "Received " << s.size() << " Layers from server:" << std::endl; + + if (s.empty() == 0) { + // Layer 1 + auto layer = s.begin(); + size_t num_bytes = (*layer).size(); + const char *weights_char = (*layer).c_str(); + const double *weights_double = + reinterpret_cast(weights_char); + std::vector weights(weights_double, + weights_double + num_bytes / sizeof(double)); + this->model.set_pred_weights(weights); + for (auto x : this->model.get_pred_weights()) + for (size_t j = 0; j < this->model.get_pred_weights().size(); j++) + std::cout << " m" << j << "_server = " << std::fixed + << this->model.get_pred_weights()[j] << std::endl; + + // Layer 2 = Bias + auto layer_2 = std::next(layer, 1); + num_bytes = (*layer_2).size(); + const char *bias_char = (*layer_2).c_str(); + const double *bias_double = reinterpret_cast(bias_char); + this->model.set_bias(bias_double[0]); + std::cout << " b_server = " << std::fixed << this->model.get_bias() + << std::endl; + } }; -flwr_local::PropertiesRes SimpleFlwrClient::get_properties(flwr_local::PropertiesIns ins) { - flwr_local::PropertiesRes p; - p.setPropertiesRes(static_cast(ins.getPropertiesIns())); - return p; +flwr_local::PropertiesRes +SimpleFlwrClient::get_properties(flwr_local::PropertiesIns ins) { + flwr_local::PropertiesRes p; + p.setPropertiesRes( + static_cast(ins.getPropertiesIns())); + return p; } /** @@ -75,41 +78,43 @@ flwr_local::PropertiesRes SimpleFlwrClient::get_properties(flwr_local::Propertie * Simple settings are used for testing, needs updates in the future */ flwr_local::FitRes SimpleFlwrClient::fit(flwr_local::FitIns ins) { - std::cout << "Fitting..." << std::endl; - flwr_local::FitRes resp; + std::cout << "Fitting..." << std::endl; + flwr_local::FitRes resp; - flwr_local::Parameters p = ins.getParameters(); - this->set_parameters(p); + flwr_local::Parameters p = ins.getParameters(); + this->set_parameters(p); - std::tuple result = this->model.train_SGD(this->training_dataset); + std::tuple result = + this->model.train_SGD(this->training_dataset); - resp.setParameters(this->get_parameters().getParameters()); - resp.setNum_example(std::get<0>(result)); + resp.setParameters(this->get_parameters().getParameters()); + resp.setNum_example(std::get<0>(result)); - return resp; + return resp; }; /** * Evaluate the provided weights using the locally held dataset * Needs updates in the future */ -flwr_local::EvaluateRes SimpleFlwrClient::evaluate(flwr_local::EvaluateIns ins) { - std::cout << "Evaluating..." << std::endl; - flwr_local::EvaluateRes resp; - flwr_local::Parameters p = ins.getParameters(); - this->set_parameters(p); - - // Evaluation returns a number_of_examples, a loss and an "accuracy" - std::tuple result = this->model.evaluate(this->test_dataset); - - resp.setNum_example(std::get<0>(result)); - resp.setLoss(std::get<1>(result)); - - flwr_local::Scalar loss_metric = flwr_local::Scalar(); - loss_metric.setDouble(std::get<2>(result)); - std::map metric = {{"loss", loss_metric}}; - resp.setMetrics(metric); - - return resp; - +flwr_local::EvaluateRes +SimpleFlwrClient::evaluate(flwr_local::EvaluateIns ins) { + std::cout << "Evaluating..." << std::endl; + flwr_local::EvaluateRes resp; + flwr_local::Parameters p = ins.getParameters(); + this->set_parameters(p); + + // Evaluation returns a number_of_examples, a loss and an "accuracy" + std::tuple result = + this->model.evaluate(this->test_dataset); + + resp.setNum_example(std::get<0>(result)); + resp.setLoss(std::get<1>(result)); + + flwr_local::Scalar loss_metric = flwr_local::Scalar(); + loss_metric.setDouble(std::get<2>(result)); + std::map metric = {{"loss", loss_metric}}; + resp.setMetrics(metric); + + return resp; }; diff --git a/examples/quickstart-cpp/src/synthetic_dataset.cc b/examples/quickstart-cpp/src/synthetic_dataset.cc index 56aa9570125..8ba5884ce8f 100644 --- a/examples/quickstart-cpp/src/synthetic_dataset.cc +++ b/examples/quickstart-cpp/src/synthetic_dataset.cc @@ -6,58 +6,56 @@ #include #include -#include #include +#include -SyntheticDataset::SyntheticDataset(std::vector ms, double b, size_t size) { - std::random_device rd; - std::mt19937 mt(rd()); - std::uniform_int_distribution<> distr(-10.0, 10.0); - std::cout << "True parameters: " << std::endl; - for (int i = 0; i < ms.size(); i++) { - std::cout << std::fixed << " m" << i << " = " << ms[i] << std::endl; - } +SyntheticDataset::SyntheticDataset(std::vector ms, double b, + size_t size) { + std::random_device rd; + std::mt19937 mt(rd()); + std::uniform_int_distribution<> distr(-10.0, 10.0); + std::cout << "True parameters: " << std::endl; + for (int i = 0; i < ms.size(); i++) { + std::cout << std::fixed << " m" << i << " = " << ms[i] << std::endl; + } - std::cout << " b = " << std::fixed << b << std::endl; + std::cout << " b = " << std::fixed << b << std::endl; - std::vector> xs(size, std::vector(ms.size())); - std::vector ys(size, 0); - for (int m_ind = 0; m_ind < ms.size(); m_ind++) { - std::uniform_real_distribution distx(-10.0, 10.0); + std::vector> xs(size, std::vector(ms.size())); + std::vector ys(size, 0); + for (int m_ind = 0; m_ind < ms.size(); m_ind++) { + std::uniform_real_distribution distx(-10.0, 10.0); - for (int i = 0; i < size; i++) { - xs[i][m_ind] = distx(mt); - } + for (int i = 0; i < size; i++) { + xs[i][m_ind] = distx(mt); } + } - for (int i = 0; i < size; i++) { - ys[i] = b; - for (int m_ind = 0; m_ind < ms.size(); m_ind++) { - ys[i] += ms[m_ind] * xs[i][m_ind]; - } + for (int i = 0; i < size; i++) { + ys[i] = b; + for (int m_ind = 0; m_ind < ms.size(); m_ind++) { + ys[i] += ms[m_ind] * xs[i][m_ind]; } + } - std::vector> data_points; - for (int i = 0; i < size; i++) { - std::vector data_point; - data_point.insert(data_point.end(), xs[i].begin(), xs[i].end()); - data_point.push_back(ys[i]); + std::vector> data_points; + for (int i = 0; i < size; i++) { + std::vector data_point; + data_point.insert(data_point.end(), xs[i].begin(), xs[i].end()); + data_point.push_back(ys[i]); - data_points.push_back(data_point); - } + data_points.push_back(data_point); + } - this->data_points = data_points; + this->data_points = data_points; } -size_t SyntheticDataset::size() { - return this->data_points.size(); -} +size_t SyntheticDataset::size() { return this->data_points.size(); } int SyntheticDataset::get_features_count() { - return this->data_points[0].size() - 1; + return this->data_points[0].size() - 1; } std::vector> SyntheticDataset::get_data_points() { - return this->data_points; + return this->data_points; } - diff --git a/src/cc/flwr/include/serde.h b/src/cc/flwr/include/serde.h index 21cc8458e96..8b9d809d7c8 100644 --- a/src/cc/flwr/include/serde.h +++ b/src/cc/flwr/include/serde.h @@ -13,14 +13,10 @@ * ********************************************************************************************************/ #pragma once -// cppcheck-suppress missingInclude -#include "flwr/proto/transport.grpc.pb.h" -// cppcheck-suppress missingInclude -#include "flwr/proto/transport.pb.h" -// cppcheck-suppress missingInclude #include "flwr/proto/fleet.grpc.pb.h" -// cppcheck-suppress missingInclude #include "flwr/proto/fleet.pb.h" +#include "flwr/proto/transport.grpc.pb.h" +#include "flwr/proto/transport.pb.h" #include "typing.h" /**