Skip to content
This repository has been archived by the owner on Nov 18, 2021. It is now read-only.

Commit

Permalink
Feature/blas refactor (#983)
Browse files Browse the repository at this point in the history
* Creating BLAS routines for tensor

* bugfix for embeddings

* tidying and style

* style

* fixed bug - failure to zero out accumulated gradients

* removed unecessary copies & minor refactor

* style

* updated embeddings backward test to check gradients zeroed appropriately + gradients copy export for weights

* Changes

* Adding iterator tests

* Fixing iterator test

* Fixing issues

* Fixing naming

* Minor changes

* Adding licenses

* Minor fixes

* Adding default values in Tensor

* Updating tensor impl.

* Updating style

* Fixing Relu and adding test

* Some minor changes to convolution 1d

* Adding enhanced tensor iterator

* trivial style changes

* update conv1d test

* bugfixes to convolution tests

* Updating style

* Fixing cast error

* Restarting jenkins

* Addressing comments

* Merging develop and updating

* Fixing slice

* Making embeddings test pass

* Fixing some style issues

* Fixing some style issues

* fix

* Updating

* bugfix relu backward
  • Loading branch information
troelsfr authored May 14, 2019
1 parent 5699ed5 commit e3f5b83
Show file tree
Hide file tree
Showing 72 changed files with 7,921 additions and 790 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ CMakeCache.txt

nodes/
.ipynb_checkpoints/
docker-images/

# Legacy Editors
\#*#
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ features. Fetch will be delivering regular updates.
3. [Community Website](https://community.fetch.ai/)
4. [Community Telegram Group](https://t.me/fetchai)
5. [Whitepapers](https://fetch.ai/publications.html)
6. [Roadmap](https://fetch.ai/#/roadmap)
6. [Roadmap](https://fetch.ai/#/roadmap)


## Supported platforms
Expand Down
2 changes: 1 addition & 1 deletion libs/math/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ setup_compiler()
#-------------------------------------------------------------------------------

setup_library(fetch-math)
target_link_libraries(fetch-math INTERFACE fetch-core)
target_link_libraries(fetch-math PUBLIC fetch-core)

add_test_target()

Expand Down
16 changes: 10 additions & 6 deletions libs/math/benchmark/basic_math/exp_bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,16 @@ static void BM_ApproxExpImplementation(benchmark::State &state)
{

fetch::math::ApproxExpImplementation<N, C> fexp;
double x = (double)state.range(0);
double result;
double x = 0.1; //(double)state.range(0);
double result = 0;
for (auto _ : state)
{
// Single iteration is too small to get accurate benchmarks.
x += 0.1;
for (int i = 0; i < 1000; i++)
{
benchmark::DoNotOptimize(result = fexp(x));
x += 0.0001;
result += fexp(x);
}
}
}
Expand All @@ -46,14 +48,16 @@ BENCHMARK_TEMPLATE(BM_ApproxExpImplementation, 12, 60801)->RangeMultiplier(10)->

static void BM_exp(benchmark::State &state)
{
double x = (double)state.range(0);
double result;
double x = 0.1; // (double)state.range(0);
double result = 0.0;
for (auto _ : state)
{
// Single iteration is too small to get accurate benchmarks.
x += 0.1;
for (int i = 0; i < 1000; i++)
{
benchmark::DoNotOptimize(result = exp(x));
x += 0.0001;
result += exp(x);
}
}
}
Expand Down
41 changes: 41 additions & 0 deletions libs/math/include/math/linalg/blas/base.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#pragma once
//------------------------------------------------------------------------------
//
// Copyright 2018-2019 Fetch.AI Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------

#include "vectorise/memory/shared_array.hpp"
#include "vectorise/platform.hpp"

namespace fetch {
namespace math {

template <typename T, typename C>
class Tensor;

namespace linalg {
template <typename T, uint64_t S, uint64_t I,
uint64_t V = platform::Parallelisation::VECTORISE | platform::Parallelisation::THREADING>
class Blas
{
public:
template <typename... Args>
void operator()(Args... args) = delete;
};

} // namespace linalg
} // namespace math
} // namespace fetch
56 changes: 56 additions & 0 deletions libs/math/include/math/linalg/blas/gemm_nn_novector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#pragma once
//------------------------------------------------------------------------------
//
// Copyright 2018-2019 Fetch.AI Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------

/* The class defined in this file implements the equivalent of
* following Python code:
*
* import numpy as np
* import copy
*
* def gemm_nn_novector(alpha, A, B, beta, C):
* C = alpha * np.dot(A, B) + beta * C
*
* return C
*
* Authors:
*/

#include "math/linalg/blas/base.hpp"
#include "math/linalg/prototype.hpp"
#include "math/tensor.hpp"

namespace fetch {
namespace math {
namespace linalg {

template <typename S>
class Blas<S, Signature(_C <= _alpha, _A, _B, _beta, _C),
Computes(_C <= _alpha * _A * _B + _beta * _C), platform::Parallelisation::NOT_PARALLEL>
{
public:
using Type = S;
using VectorRegisterType = typename Tensor<Type>::VectorRegisterType;

void operator()(Type const &alpha, Tensor<Type> const &a, Tensor<Type> const &b, Type const &beta,
Tensor<Type> &c) const;
};

} // namespace linalg
} // namespace math
} // namespace fetch
56 changes: 56 additions & 0 deletions libs/math/include/math/linalg/blas/gemm_nn_vector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#pragma once
//------------------------------------------------------------------------------
//
// Copyright 2018-2019 Fetch.AI Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------

/* The class defined in this file implements the equivalent of
* following Python code:
*
* import numpy as np
* import copy
*
* def gemm_nn_vector(alpha, A, B, beta, C):
* C = alpha * np.dot(A, B) + beta * C
*
* return C
*
* Authors:
*/

#include "math/linalg/blas/base.hpp"
#include "math/linalg/prototype.hpp"
#include "math/tensor.hpp"

namespace fetch {
namespace math {
namespace linalg {

template <typename S>
class Blas<S, Signature(_C <= _alpha, _A, _B, _beta, _C),
Computes(_C <= _alpha * _A * _B + _beta * _C), platform::Parallelisation::VECTORISE>
{
public:
using Type = S;
using VectorRegisterType = typename Tensor<Type>::VectorRegisterType;

void operator()(Type const &alpha, Tensor<Type> const &a, Tensor<Type> const &b, Type const &beta,
Tensor<Type> &c) const;
};

} // namespace linalg
} // namespace math
} // namespace fetch
57 changes: 57 additions & 0 deletions libs/math/include/math/linalg/blas/gemm_nt_novector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#pragma once
//------------------------------------------------------------------------------
//
// Copyright 2018-2019 Fetch.AI Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------

/* The class defined in this file implements the equivalent of
* following Python code:
*
* import numpy as np
* import copy
*
* def gemm_nt_novector(alpha, A, B, beta, C):
* C = alpha * np.dot(A, B.T) + beta * C
*
* return C
*
* Authors:
*/

#include "math/linalg/blas/base.hpp"
#include "math/linalg/prototype.hpp"
#include "math/tensor.hpp"

namespace fetch {
namespace math {
namespace linalg {

template <typename S>
class Blas<S, Signature(_C <= _alpha, _A, _B, _beta, _C),
Computes(_C <= _alpha * _A * T(_B) + _beta * _C),
platform::Parallelisation::NOT_PARALLEL>
{
public:
using Type = S;
using VectorRegisterType = typename Tensor<Type>::VectorRegisterType;

void operator()(Type const &alpha, Tensor<Type> const &a, Tensor<Type> const &b, Type const &beta,
Tensor<Type> &c) const;
};

} // namespace linalg
} // namespace math
} // namespace fetch
56 changes: 56 additions & 0 deletions libs/math/include/math/linalg/blas/gemm_nt_vector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#pragma once
//------------------------------------------------------------------------------
//
// Copyright 2018-2019 Fetch.AI Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------

/* The class defined in this file implements the equivalent of
* following Python code:
*
* import numpy as np
* import copy
*
* def gemm_nt_vector(alpha, A, B, beta, C):
* C = alpha * np.dot(A, B.T) + beta * C
*
* return C
*
* Authors:
*/

#include "math/linalg/blas/base.hpp"
#include "math/linalg/prototype.hpp"
#include "math/tensor.hpp"

namespace fetch {
namespace math {
namespace linalg {

template <typename S>
class Blas<S, Signature(_C <= _alpha, _A, _B, _beta, _C),
Computes(_C <= _alpha * _A * T(_B) + _beta * _C), platform::Parallelisation::VECTORISE>
{
public:
using Type = S;
using VectorRegisterType = typename Tensor<Type>::VectorRegisterType;

void operator()(Type const &alpha, Tensor<Type> const &a, Tensor<Type> const &b, Type const &beta,
Tensor<Type> &c) const;
};

} // namespace linalg
} // namespace math
} // namespace fetch
57 changes: 57 additions & 0 deletions libs/math/include/math/linalg/blas/gemm_tn_novector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#pragma once
//------------------------------------------------------------------------------
//
// Copyright 2018-2019 Fetch.AI Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------

/* The class defined in this file implements the equivalent of
* following Python code:
*
* import numpy as np
* import copy
*
* def gemm_tn_novector(alpha, A, B, beta, C):
* C = alpha * np.dot(A.T, B) + beta * C
*
* return C
*
* Authors:
*/

#include "math/linalg/blas/base.hpp"
#include "math/linalg/prototype.hpp"
#include "math/tensor.hpp"

namespace fetch {
namespace math {
namespace linalg {

template <typename S>
class Blas<S, Signature(_C <= _alpha, _A, _B, _beta, _C),
Computes(_C <= _alpha * T(_A) * _B + _beta * _C),
platform::Parallelisation::NOT_PARALLEL>
{
public:
using Type = S;
using VectorRegisterType = typename Tensor<Type>::VectorRegisterType;

void operator()(Type const &alpha, Tensor<Type> const &a, Tensor<Type> const &b, Type const &beta,
Tensor<Type> &c) const;
};

} // namespace linalg
} // namespace math
} // namespace fetch
Loading

0 comments on commit e3f5b83

Please sign in to comment.