Skip to content

Commit

Permalink
WIP: skeleton for RBF
Browse files Browse the repository at this point in the history
  • Loading branch information
Ravenwater committed Nov 16, 2024
1 parent 3f5e8ee commit 8ce093b
Show file tree
Hide file tree
Showing 3 changed files with 51 additions and 36 deletions.
35 changes: 0 additions & 35 deletions applications/mixed-precision/dnn/fit_sin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,28 +82,11 @@ void SinFunctionFit() {
std::cout << "Result : y = " << a << " + " << b << "x + " << c << "x^2 + " << d << "x^3\n";
}

// Regression testing guards: typically set by the cmake configuration, but MANUAL_TESTING is an override
#define MANUAL_TESTING 0
// REGRESSION_LEVEL_OVERRIDE is set by the cmake file to drive a specific regression intensity
// It is the responsibility of the regression test to organize the tests in a quartile progression.
//#undef REGRESSION_LEVEL_OVERRIDE
#ifndef REGRESSION_LEVEL_OVERRIDE
#undef REGRESSION_LEVEL_1
#undef REGRESSION_LEVEL_2
#undef REGRESSION_LEVEL_3
#undef REGRESSION_LEVEL_4
#define REGRESSION_LEVEL_1 1
#define REGRESSION_LEVEL_2 1
#define REGRESSION_LEVEL_3 1
#define REGRESSION_LEVEL_4 1
#endif

int main()
try {
using namespace sw::universal;


#if MANUAL_TESTING
using bf16 = cfloat<16, 8, uint16_t, true, true, false>;
SinFunctionFit<float>(); // Result : y = -0.2031700 + 0.800356x + -0.0207303x^2 + -0.0852961x^3: loss = 13.1245
SinFunctionFit<fp32>(); // Result : y = -0.2031700 + 0.800356x + -0.0207303x^2 + -0.0852961x^3: loss = 13.1245
Expand All @@ -129,25 +112,7 @@ try {
SinFunctionFit<l16_12>(); // Result : y = 0.1230070 + 0.434995x + 0.5860130x^2 + 0.2949960x^3: loss = 15.9973
SinFunctionFit<l16_14>(); // Result : y = 0 + 0x + 0.585988x^2 + 0x^3 : loss = 1.99992

#else

#if REGRESSION_LEVEL_1
SinFunctionFit<float>(); // Result : y = -0.2031700 + 0.800356x + -0.0207303x^2 + -0.0852961x^3: loss = 13.1245
#endif

#if REGRESSION_LEVEL_2

#endif

#if REGRESSION_LEVEL_3

#endif

#if REGRESSION_LEVEL_4

#endif

#endif
return EXIT_SUCCESS;
}
catch (char const* msg) {
Expand Down
49 changes: 49 additions & 0 deletions applications/mixed-precision/dnn/rbf.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Copyright (C) 2017 Stillwater Supercomputing, Inc.
// SPDX-License-Identifier: MIT
//
// This file is part of the universal numbers project, which is released under an MIT Open Source license.
#include <universal/utility/directives.hpp>

#include <universal/dnn/dnn.hpp>

/*
Image Preprocessing Precondition:
- Load images and convert them to a suitable format (e.g., grayscale or feature vectors).
- Normalize pixel values to a common range (e.g., 0-1).
RBF Network Training Steps:
- Center Selection:
Choose a subset of training images as centers for the RBF units.
- Width Parameter:
Determine the width parameter (sigma) for the RBF functions, controlling their influence.
- Weight Training:
Use a supervised learning algorithm (e.g., gradient descent) to adjust the weights connecting
the RBF layer to the output layer.
Image Classification Postcondition:
- For a new image, calculate the activation of each RBF unit based on its distance from the centers.
- Feed the activations to the output layer and use a decision rule (e.g., maximum activation) to determine the class.
*/


#include <vector>
#include <cmath>


int main() {
// Load and preprocess images
// ...



// Create RBF network


// Train the network


// Test the network


return 0;
}
3 changes: 2 additions & 1 deletion include/universal/dnn/dnn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
// Super-simple DNN implementation to aid the application,
// numerical, and reproducibility examples.
//
// Copyright (C) 2021 Stillwater Supercomputing, Inc.
// Copyright (C) 2017 Stillwater Supercomputing, Inc.
// SPDX-License-Identifier: MIT
//
// This file is part of the universal numbers project, which is released under an MIT Open Source license.
#ifndef _UNIVERSAL_DNN_LIBRARY
Expand Down

0 comments on commit 8ce093b

Please sign in to comment.