diff --git a/applications/mixed-precision/dnn/fit_sin.cpp b/applications/mixed-precision/dnn/fit_sin.cpp index 31a149f33..b3a34a48e 100644 --- a/applications/mixed-precision/dnn/fit_sin.cpp +++ b/applications/mixed-precision/dnn/fit_sin.cpp @@ -82,28 +82,11 @@ void SinFunctionFit() { std::cout << "Result : y = " << a << " + " << b << "x + " << c << "x^2 + " << d << "x^3\n"; } -// Regression testing guards: typically set by the cmake configuration, but MANUAL_TESTING is an override -#define MANUAL_TESTING 0 -// REGRESSION_LEVEL_OVERRIDE is set by the cmake file to drive a specific regression intensity -// It is the responsibility of the regression test to organize the tests in a quartile progression. -//#undef REGRESSION_LEVEL_OVERRIDE -#ifndef REGRESSION_LEVEL_OVERRIDE -#undef REGRESSION_LEVEL_1 -#undef REGRESSION_LEVEL_2 -#undef REGRESSION_LEVEL_3 -#undef REGRESSION_LEVEL_4 -#define REGRESSION_LEVEL_1 1 -#define REGRESSION_LEVEL_2 1 -#define REGRESSION_LEVEL_3 1 -#define REGRESSION_LEVEL_4 1 -#endif - int main() try { using namespace sw::universal; -#if MANUAL_TESTING using bf16 = cfloat<16, 8, uint16_t, true, true, false>; SinFunctionFit(); // Result : y = -0.2031700 + 0.800356x + -0.0207303x^2 + -0.0852961x^3: loss = 13.1245 SinFunctionFit(); // Result : y = -0.2031700 + 0.800356x + -0.0207303x^2 + -0.0852961x^3: loss = 13.1245 @@ -129,25 +112,7 @@ try { SinFunctionFit(); // Result : y = 0.1230070 + 0.434995x + 0.5860130x^2 + 0.2949960x^3: loss = 15.9973 SinFunctionFit(); // Result : y = 0 + 0x + 0.585988x^2 + 0x^3 : loss = 1.99992 -#else - -#if REGRESSION_LEVEL_1 - SinFunctionFit(); // Result : y = -0.2031700 + 0.800356x + -0.0207303x^2 + -0.0852961x^3: loss = 13.1245 -#endif - -#if REGRESSION_LEVEL_2 - -#endif - -#if REGRESSION_LEVEL_3 - -#endif - -#if REGRESSION_LEVEL_4 - -#endif -#endif return EXIT_SUCCESS; } catch (char const* msg) { diff --git a/applications/mixed-precision/dnn/rbf.cpp b/applications/mixed-precision/dnn/rbf.cpp new file mode 100644 index 000000000..78d624276 --- /dev/null +++ b/applications/mixed-precision/dnn/rbf.cpp @@ -0,0 +1,49 @@ +// Copyright (C) 2017 Stillwater Supercomputing, Inc. +// SPDX-License-Identifier: MIT +// +// This file is part of the universal numbers project, which is released under an MIT Open Source license. +#include + +#include + +/* +Image Preprocessing Precondition: + - Load images and convert them to a suitable format (e.g., grayscale or feature vectors). + - Normalize pixel values to a common range (e.g., 0-1). + +RBF Network Training Steps: + - Center Selection: + Choose a subset of training images as centers for the RBF units. + - Width Parameter: + Determine the width parameter (sigma) for the RBF functions, controlling their influence. + - Weight Training: + Use a supervised learning algorithm (e.g., gradient descent) to adjust the weights connecting + the RBF layer to the output layer. + +Image Classification Postcondition: + - For a new image, calculate the activation of each RBF unit based on its distance from the centers. + - Feed the activations to the output layer and use a decision rule (e.g., maximum activation) to determine the class. +*/ + + +#include +#include + + +int main() { + // Load and preprocess images + // ... + + + + // Create RBF network + + + // Train the network + + + // Test the network + + + return 0; +} \ No newline at end of file diff --git a/include/universal/dnn/dnn.hpp b/include/universal/dnn/dnn.hpp index 9fc20885b..10c9c2960 100644 --- a/include/universal/dnn/dnn.hpp +++ b/include/universal/dnn/dnn.hpp @@ -3,7 +3,8 @@ // Super-simple DNN implementation to aid the application, // numerical, and reproducibility examples. // -// Copyright (C) 2021 Stillwater Supercomputing, Inc. +// Copyright (C) 2017 Stillwater Supercomputing, Inc. +// SPDX-License-Identifier: MIT // // This file is part of the universal numbers project, which is released under an MIT Open Source license. #ifndef _UNIVERSAL_DNN_LIBRARY