Skip to content

Commit

Permalink
Merge branch 'master' into tokenizer-fix-decode
Browse files Browse the repository at this point in the history
  • Loading branch information
apaniukov authored Nov 20, 2023
2 parents 14f993b + 13b3d75 commit cae3098
Show file tree
Hide file tree
Showing 34 changed files with 433 additions and 424 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#include "cuda_test_constants.hpp"

using namespace LayerTestsDefinitions;
using namespace ngraph::helpers;
using ov::test::utils::ActivationTypes;
namespace {

const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
Expand All @@ -27,40 +27,40 @@ const std::vector<InferenceEngine::Precision> intPrecisions = {
// TODO commented tests don't work for CUDA now.
// The reason there are missing correspondent operations or transformation
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
{Sigmoid, {}},
{Tanh, {}},
{Relu, {}},
{Exp, {}},
{Log, {}},
// {Sign, {}},
{Abs, {}},
{Clamp, {{-2.0f, 2.0f}}},
{Negative, {}},
// {Acos, {}},
// {Asin, {}},
// {Atan, {}},
{Cos, {}},
{Cosh, {}},
{Floor, {}},
{Sin, {}},
{Sinh, {}},
{Sqrt, {}},
// {Tan, {}},
{Elu, {{0.1f}}},
// {Erf, {}},
// {HardSigmoid, {{0.2f, 0.5f}}},
// {Selu, {{1.6732f, 1.0507f}}},
// {Ceiling, {}},
{Mish, {}},
{Swish, {{0.5f}}},
{HSwish, {}},
// {SoftPlus, {}},
{HSigmoid, {}},
// {RoundHalfToEven, {}},
// {RoundHalfAwayFromZero, {}},
{Gelu, {}},
{GeluErf, {}},
{GeluTanh, {}}};
{ActivationTypes::Sigmoid, {}},
{ActivationTypes::Tanh, {}},
{ActivationTypes::Relu, {}},
{ActivationTypes::Exp, {}},
{ActivationTypes::Log, {}},
// {ActivationTypes::Sign, {}},
{ActivationTypes::Abs, {}},
{ActivationTypes::Clamp, {{-2.0f, 2.0f}}},
{ActivationTypes::Negative, {}},
// {ActivationTypes::Acos, {}},
// {ActivationTypes::Asin, {}},
// {ActivationTypes::Atan, {}},
{ActivationTypes::Cos, {}},
{ActivationTypes::Cosh, {}},
{ActivationTypes::Floor, {}},
{ActivationTypes::Sin, {}},
{ActivationTypes::Sinh, {}},
{ActivationTypes::Sqrt, {}},
// {ActivationTypes::Tan, {}},
{ActivationTypes::Elu, {{0.1f}}},
// {ActivationTypes::Erf, {}},
// {ActivationTypes::HardSigmoid, {{0.2f, 0.5f}}},
// {ActivationTypes::Selu, {{1.6732f, 1.0507f}}},
// {ActivationTypes::Ceiling, {}},
{ActivationTypes::Mish, {}},
{ActivationTypes::Swish, {{0.5f}}},
{ActivationTypes::HSwish, {}},
// {ActivationTypes::SoftPlus, {}},
{ActivationTypes::HSigmoid, {}},
// {ActivationTypes::RoundHalfToEven, {}},
// {ActivationTypes::RoundHalfAwayFromZero, {}},
{ActivationTypes::Gelu, {}},
{ActivationTypes::GeluErf, {}},
{ActivationTypes::GeluTanh, {}}};

class CUDAActivationIntegerLayerTest : public ActivationLayerTest {
void SetUp() override {
Expand All @@ -71,18 +71,18 @@ class CUDAActivationIntegerLayerTest : public ActivationLayerTest {

// List of operations that should be tested also with integer precision
const std::map<ActivationTypes, std::vector<std::vector<float>>> intActivationTypes = {
{Abs, {}},
{Negative, {}},
{Cos, {}},
{Cosh, {}},
{Sinh, {}},
{Sqrt, {}},
{Log, {}},
{ActivationTypes::Abs, {}},
{ActivationTypes::Negative, {}},
{ActivationTypes::Cos, {}},
{ActivationTypes::Cosh, {}},
{ActivationTypes::Sinh, {}},
{ActivationTypes::Sqrt, {}},
{ActivationTypes::Log, {}},
};

const std::map<ActivationTypes, std::vector<std::vector<float>>> preluActivationParamTypes = {
{PReLu, {{}}}, // Slope will be filled with increasing values from -10 to match slope input shape
{LeakyRelu, {{0.01f}}}};
{ActivationTypes::PReLu, {{}}}, // Slope will be filled with increasing values from -10 to match slope input shape
{ActivationTypes::LeakyRelu, {{0.01f}}}};

std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> basic = {
{{1, 50}, {{}}},
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
namespace {
using namespace ov::test;
using namespace ov::test::utils;
using namespace ngraph::helpers;

class UnsymmetricalComparisonLayerTest : public UnsymmetricalComparer<ComparisonLayerTest> {};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,10 +158,8 @@ class ConvolutionBackpropDataExtendedLayerTest

auto outputShapeNode = std::make_shared<ov::op::v0::Constant>(
ov::element::Type_t::i64, ov::Shape{outputShapeData.size()}, outputShapeData);
auto paramOuts =
ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ov::op::v0::Parameter>(params));
auto convBackpropData = std::dynamic_pointer_cast<ngraph::opset1::ConvolutionBackpropData>(
makeConvolutionBackpropData(paramOuts[0],
makeConvolutionBackpropData(params[0],
outputShapeNode,
ngPrc,
kernel,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,10 +156,8 @@ class ConvolutionBackpropDataAddExtendedLayerTest

auto outputShapeNode = std::make_shared<ov::op::v0::Constant>(
ov::element::Type_t::i64, ov::Shape{outputShapeData.size()}, outputShapeData);
auto paramOuts =
ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ov::op::v0::Parameter>(params));
auto convBackpropData = std::dynamic_pointer_cast<ngraph::opset1::ConvolutionBackpropData>(
makeConvolutionBackpropData(paramOuts[0],
makeConvolutionBackpropData(params[0],
outputShapeNode,
ngPrc,
kernel,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include "average_finder.hpp"

namespace LayerTestsDefinitions {
using ov::test::utils::ActivationTypes;

constexpr uint32_t RANGE = 10;
constexpr int32_t START_FROM = -5;
Expand Down Expand Up @@ -85,9 +86,9 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
};

const std::vector<ngraph::helpers::ActivationTypes> netActivations = {
ngraph::helpers::ActivationTypes::None,
ngraph::helpers::ActivationTypes::Relu,
const std::vector<ActivationTypes> netActivations = {
ActivationTypes::None,
ActivationTypes::Relu,
};

/* ============= 2D Convolution ============= */
Expand Down Expand Up @@ -225,7 +226,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::Values(InferenceEngine::Layout::ANY), // Output layout
::testing::Values(std::vector<size_t>({1, 88, 10, 10})), // Input shape
::testing::Values(ov::test::utils::DEVICE_NVIDIA)),
::testing::Values(ngraph::helpers::ActivationTypes::None)),
::testing::Values(ActivationTypes::None)),
ConvolutionBiasAddActivationThresholdLayerTest::getTestCaseName);

/* ============= resnet50/vgg16 Convolutions ============= */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,16 +31,17 @@
#include <vector>

namespace LayerTestsDefinitions {
using ov::test::utils::ActivationTypes;

// TODO: Consider to add bias shape in here too, instead of deriving it in test class.
// That would allow test generator to use bias shape from model
typedef std::tuple<convLayerTestParamsSet,
ngraph::helpers::ActivationTypes // Activation
ActivationTypes // Activation
>
convBAATestParamSet;

typedef std::tuple<groupConvLayerTestParamsSet,
ngraph::helpers::ActivationTypes // Activation
ActivationTypes // Activation
>
groupConvBAATestParamSet;

Expand Down Expand Up @@ -89,13 +90,13 @@ class BasicConvolutionBiasAddActivationLayerTest

static std::string getTestCaseName(testing::TestParamInfo<typename Traits::ConvBAAParamSet> obj) {
typename Traits::ConvParamSet convParamSet;
ngraph::helpers::ActivationTypes activation;
ActivationTypes activation;
std::tie(convParamSet, activation) = obj.param;

std::ostringstream result;
result << TConvLayerTest::getTestCaseName({convParamSet, obj.index}) << "_";
result << "Activation="
<< (activation == ngraph::helpers::ActivationTypes::None
<< (activation == ActivationTypes::None
? "None"
: LayerTestsDefinitions::activationNames[activation]);
return result.str();
Expand All @@ -104,7 +105,7 @@ class BasicConvolutionBiasAddActivationLayerTest
protected:
void SetUp() override {
typename Traits::ConvParamSet convParamSet;
ngraph::helpers::ActivationTypes activation;
ActivationTypes activation;
std::tie(convParamSet, activation) = this->GetParam();

ov::element::Type ngNetPrc = ov::element::Type_t::undefined;
Expand Down Expand Up @@ -133,7 +134,7 @@ class BasicConvolutionBiasAddActivationLayerTest
} else {
lastNode = biasAddLayer;
}
if (activation != ngraph::helpers::ActivationTypes::None) {
if (activation != ActivationTypes::None) {
lastNode = ngraph::builder::makeActivation(lastNode, ngNetPrc, activation);
}

Expand Down Expand Up @@ -164,14 +165,11 @@ class BasicConvolutionBiasAddActivationLayerTest

auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};

auto paramOuts =
ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ov::op::v0::Parameter>(params));
std::vector<float> filter_weights;

std::shared_ptr<ov::Node> convNode = nullptr;
if constexpr (!isGroup) {
convNode = ngraph::builder::makeConvolution(paramOuts[0],
convNode = ngraph::builder::makeConvolution(params[0],
ngPrc,
kernel,
stride,
Expand All @@ -183,7 +181,7 @@ class BasicConvolutionBiasAddActivationLayerTest
false,
filter_weights);
} else {
convNode = ngraph::builder::makeGroupConvolution(paramOuts[0],
convNode = ngraph::builder::makeGroupConvolution(params[0],
ngPrc,
kernel,
stride,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
namespace LayerTestsDefinitions {

namespace {
using ov::test::utils::EltwiseTypes;
using ov::test::utils::InputLayerType;

template <InferenceEngine::Precision::ePrecision PRC>
void replace(InferenceEngine::Blob::Ptr& blob, float old_value, float new_value, bool is_integer) {
Expand Down Expand Up @@ -166,18 +168,18 @@ InferenceEngine::Blob::Ptr CudaEltwiseLayerTest::GenerateInput(const InferenceEn
const auto precision = info.getPrecision();
const auto is_float = precision.is_float();
switch (op_type) {
case ngraph::helpers::EltwiseTypes::POWER:
case EltwiseTypes::POWER:
return is_float ? FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 2, 128)
: FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 4, 2);
case ngraph::helpers::EltwiseTypes::DIVIDE:
case ngraph::helpers::EltwiseTypes::MOD: {
case EltwiseTypes::DIVIDE:
case EltwiseTypes::MOD: {
auto blob = FuncTestUtils::createAndFillBlob(info.getTensorDesc(), range, start_from, resolution, seed);
if (!is_float && info.name() == secondary_input_name) {
replace(blob, precision, 0, 1, true);
}
return blob;
}
case ngraph::helpers::EltwiseTypes::ERF:
case EltwiseTypes::ERF:
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 6, -3);
default:
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), range, start_from, resolution, seed);
Expand All @@ -193,9 +195,9 @@ void CudaEltwiseLayerTest::SetUp() {
ov::test::ElementType netType;
ov::test::ElementType in_prc;
ov::test::ElementType out_prc;
ngraph::helpers::InputLayerType secondaryInputType;
InputLayerType secondaryInputType;
ov::test::utils::OpType opType;
ngraph::helpers::EltwiseTypes eltwiseType;
EltwiseTypes eltwiseType;
ov::AnyMap additionalConfig;
const ov::test::subgraph::EltwiseTestParams ew_params = std::get<0>(this->GetParam());
const OperationMode mode = std::get<1>(this->GetParam());
Expand Down Expand Up @@ -223,12 +225,12 @@ void CudaEltwiseLayerTest::SetUp() {
FAIL() << "Unsupported Secondary operation type";
}
// To propagate shape_input_secondary just in static case because all shapes are defined in dynamic scenarion
if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) {
if (secondaryInputType == InputLayerType::PARAMETER) {
transformInputShapesAccordingEltwise(shape_input_secondary);
}

std::shared_ptr<ngraph::Node> secondaryInput;
if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) {
if (secondaryInputType == InputLayerType::PARAMETER) {
auto input = std::make_shared<ov::op::v0::Parameter>(netType, shape_input_secondary);
secondaryInput = input;
parameters.push_back(input);
Expand All @@ -238,8 +240,8 @@ void CudaEltwiseLayerTest::SetUp() {
auto data = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(
ngraph::shape_size(shape), up_to, start_from, seed);
switch (eltwiseType) {
case ngraph::helpers::EltwiseTypes::DIVIDE:
case ngraph::helpers::EltwiseTypes::MOD: {
case EltwiseTypes::DIVIDE:
case EltwiseTypes::MOD: {
if (ov::element::Type{netType}.is_integral()) {
std::replace_if(
data.begin(),
Expand All @@ -250,7 +252,7 @@ void CudaEltwiseLayerTest::SetUp() {
secondaryInput = std::make_shared<ov::op::v0::Constant>(netType, shape, data);
break;
}
case ngraph::helpers::EltwiseTypes::POWER: {
case EltwiseTypes::POWER: {
ov::Tensor random_tensor(netType, shape);
ov::test::utils::fill_tensor_random(random_tensor, 3, -3);
secondaryInput = std::make_shared<ov::op::v0::Constant>(random_tensor);
Expand All @@ -268,7 +270,7 @@ void CudaEltwiseLayerTest::SetUp() {
secondary_input_name = secondaryInput->get_friendly_name();

const bool is_python_divide = mode == OperationMode::PYTHON_DIVIDE;
auto eltwise = eltwiseType == ngraph::helpers::EltwiseTypes::DIVIDE
auto eltwise = eltwiseType == EltwiseTypes::DIVIDE
? std::make_shared<ngraph::op::v1::Divide>(parameters[0], secondaryInput, is_python_divide)
: ngraph::builder::makeEltwise(parameters[0], secondaryInput, eltwiseType);
function = std::make_shared<ngraph::Function>(eltwise, parameters, "Eltwise");
Expand Down
Loading

0 comments on commit cae3098

Please sign in to comment.