Skip to content

Commit

Permalink
#12758: Update files
Browse files Browse the repository at this point in the history
  • Loading branch information
mouliraj-mcw committed Sep 19, 2024
1 parent 7adf026 commit 8165bdf
Show file tree
Hide file tree
Showing 6 changed files with 61 additions and 29 deletions.
29 changes: 28 additions & 1 deletion tests/ttnn/unit_tests/operations/test_binary_composite.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def test_binary_div_ttnn_opt(accurate_mode, round_mode, input_shapes, device):
),
)
@pytest.mark.parametrize("value", [-5.1, 0.0, 10.9])
def test_binary_div_overload_ttnn(accurate_mode, round_mode, input_shapes, value, device):
def test_binary_div_scalar_ttnn(accurate_mode, round_mode, input_shapes, value, device):
if is_grayskull():
if round_mode in ["trunc", "floor"]:
pytest.skip("does not work for Grayskull -skipping")
Expand All @@ -309,6 +309,33 @@ def test_binary_div_overload_ttnn(accurate_mode, round_mode, input_shapes, value
assert comp_pass


@pytest.mark.parametrize("accurate_mode", [False, True])
@pytest.mark.parametrize("round_mode", ["None", "trunc", "floor"])
@pytest.mark.parametrize(
"input_shapes",
(
(torch.Size([1, 1, 32, 32])),
(torch.Size([1, 1, 320, 384])),
(torch.Size([1, 3, 320, 384])),
),
)
@pytest.mark.parametrize("value", [-5.1, 0.0, 10.9])
def test_binary_div_scalar_ttnn_opt(accurate_mode, round_mode, input_shapes, value, device):
if is_grayskull():
if round_mode in ["trunc", "floor"]:
pytest.skip("does not work for Grayskull -skipping")
in_data1, input_tensor1 = data_gen_with_range(input_shapes, -100, 100, device)
_, output_tensor = data_gen_with_range(input_shapes, -1, 1, device)

cq_id = 0
ttnn.div(input_tensor1, value, accurate_mode=accurate_mode, round_mode=round_mode, output_tensor=output_tensor)
golden_function = ttnn.get_golden_function(ttnn.div)
golden_tensor = golden_function(in_data1, value, round_mode)

comp_pass = compare_pcc([output_tensor], [golden_tensor])
assert comp_pass


@pytest.mark.parametrize(
"input_shapes",
(
Expand Down
6 changes: 4 additions & 2 deletions ttnn/cpp/ttnn/operations/eltwise/binary/binary_composite.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,14 +79,16 @@ struct ExecuteDiv
const Tensor& input_tensor_b,
bool accurate_mode = false,
const std::string& round_mode = "None",
const std::optional<MemoryConfig>& memory_config = std::nullopt);
const std::optional<MemoryConfig>& memory_config = std::nullopt,
std::optional<Tensor> optional_output_tensor = std::nullopt);

static Tensor invoke(
const Tensor& input_tensor,
float value,
bool accurate_mode = false,
const std::string& round_mode = "None",
const std::optional<MemoryConfig>& memory_config = std::nullopt);
const std::optional<MemoryConfig>& memory_config = std::nullopt,
std::optional<Tensor> optional_output_tensor = std::nullopt);

static Tensor invoke(
uint8_t queue_id,
Expand Down
1 change: 0 additions & 1 deletion ttnn/cpp/ttnn/operations/eltwise/binary/binary_pybind.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,6 @@ void bind_div(py::module& module, const binary_operation_t& operation, const std
* :attr:`input_tensor_a`
* :attr:`input_tensor_b` (ttnn.Tensor or Number)
* :attr:`accurate_mode`: ``false`` if input_tensor_b is non-zero, else ``true``.
* :attr:`round_mode` (Default: None)
Keyword Args:
* :attr:`accurate_mode`: ``false`` if input_tensor_b is non-zero, else ``true`` (Only if the input tensor is not ComplexTensor)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,46 +171,48 @@ Tensor ExecuteDiv::invoke(uint8_t queue_id, const Tensor& input, float value, bo
return output_tensor.value();
}

Tensor ExecuteDiv::invoke(const Tensor& input, float value, bool accurate_mode, const std::string& round_mode, const std::optional<MemoryConfig>& output_mem_config) {
return ExecuteDiv::invoke(DefaultQueueId, input, value, accurate_mode, round_mode, output_mem_config);
Tensor ExecuteDiv::invoke(const Tensor& input, float value, bool accurate_mode, const std::string& round_mode, const std::optional<MemoryConfig>& output_mem_config, std::optional<Tensor> output_tensor) {
return ExecuteDiv::invoke(DefaultQueueId, input, value, accurate_mode, round_mode, output_mem_config, output_tensor);
}

Tensor ExecuteDiv::invoke(uint8_t queue_id, const Tensor& input_a, const Tensor& input_b, bool accurate_mode, const std::string& round_mode, const std::optional<MemoryConfig>& output_mem_config, std::optional<Tensor> output_tensor) {
TT_FATAL((round_mode == "None" || round_mode == "trunc" || round_mode == "floor"), "Incorrect rounding mode (expected 'None', 'trunc', or 'floor')");
output_tensor = output_tensor.value_or(ttnn::zeros_like(input_a));
output_tensor = output_tensor.value_or(ttnn::empty_like(input_a));
auto arch = input_a.device()->arch();
if (arch == tt::ARCH::WORMHOLE_B0) {
DataType input_dtype = input_a.get_dtype();
Tensor a = typecast(input_a, DataType::FLOAT32);
Tensor b = typecast(input_b, DataType::FLOAT32);
ttnn::divide(queue_id, a, b, std::nullopt, std::nullopt, output_tensor);
Tensor a = typecast(queue_id, input_a, DataType::FLOAT32);
Tensor b = typecast(queue_id, input_b, DataType::FLOAT32);
Tensor result = ttnn::divide(queue_id, a, b);

if(round_mode == "trunc"){
ttnn::trunc(queue_id, output_tensor.value(), output_mem_config, output_tensor);
result = ttnn::trunc(queue_id, result);
}
else if(round_mode == "floor"){
ttnn::floor(queue_id, output_tensor.value(), output_mem_config, output_tensor);
result = ttnn::floor(queue_id, result);
}

if (accurate_mode == false) { // If input_b is non-zero tensor
return typecast(queue_id, output_tensor.value(), input_dtype, output_mem_config, output_tensor);
return typecast(queue_id, result, input_dtype, std::nullopt, output_tensor);
}

Tensor t_inf = ttnn::full_like(input_a, std::numeric_limits<float>::infinity());
Tensor t_nan = ttnn::full_like(input_a, std::nanf(""));
return typecast(queue_id, where(
float t_nan = std::nanf("");
float t_inf = std::numeric_limits<float>::infinity();
typecast(queue_id, where(
queue_id,
ttnn::eqz(queue_id, input_b, output_mem_config),
ttnn::where(
queue_id,
ttnn::eqz(queue_id, input_a, output_mem_config),
t_nan,
ttnn::multiply(queue_id, t_inf, ttnn::sign(input_a, output_mem_config), std::nullopt, output_mem_config)),
output_tensor.value()),
ttnn::multiply(queue_id, ttnn::sign(queue_id, input_a, output_mem_config), t_inf, std::nullopt, output_mem_config)),
result),
input_dtype,
output_mem_config,
std::nullopt,
output_tensor);
} else {
return output_tensor.value();
}
else {
ttnn::divide(queue_id, input_a, input_b, std::nullopt, std::nullopt, output_tensor);

if(round_mode == "trunc"){
Expand All @@ -224,24 +226,24 @@ Tensor ExecuteDiv::invoke(uint8_t queue_id, const Tensor& input_a, const Tensor&
return output_tensor.value();
}

Tensor t_inf = ttnn::full_like(queue_id, input_a, std::numeric_limits<float>::infinity());
Tensor t_nan = ttnn::full_like(queue_id, input_a, std::nanf(""));
float t_nan = std::nanf("");
float t_inf = std::numeric_limits<float>::infinity();
return ttnn::where(
queue_id,
ttnn::eqz(queue_id, input_b, output_mem_config),
ttnn::where(
queue_id,
ttnn::eqz(queue_id, input_a, output_mem_config),
t_nan,
ttnn::multiply(queue_id, t_inf, ttnn::sign(input_a, output_mem_config), std::nullopt, output_mem_config)),
ttnn::multiply(queue_id, ttnn::sign(input_a, output_mem_config), t_inf, std::nullopt, output_mem_config)),
output_tensor.value(),
output_mem_config,
output_tensor);
}
}

Tensor ExecuteDiv::invoke(const Tensor& input_a, const Tensor& input_b, bool accurate_mode, const std::string& round_mode, const std::optional<MemoryConfig>& output_mem_config) {
return ExecuteDiv::invoke(DefaultQueueId, input_a, input_b, accurate_mode, round_mode, output_mem_config);
Tensor ExecuteDiv::invoke(const Tensor& input_a, const Tensor& input_b, bool accurate_mode, const std::string& round_mode, const std::optional<MemoryConfig>& output_mem_config, std::optional<Tensor> output_tensor) {
return ExecuteDiv::invoke(DefaultQueueId, input_a, input_b, accurate_mode, round_mode, output_mem_config, output_tensor);
}

Tensor _div_no_nan_overload(const Tensor& input_a, float value, const std::optional<MemoryConfig>& output_mem_config) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -367,15 +367,16 @@ Tensor _swish(const Tensor& a, const std::optional<MemoryConfig>& output_mem_con

Tensor ExecuteTrunc::invoke(uint8_t queue_id, const Tensor& input, const std::optional<MemoryConfig>& output_mem_config, std::optional<Tensor> output_tensor) {
auto arch = input.device()->arch();
output_tensor = output_tensor.value_or(ttnn::empty_like(input));
TT_FATAL(arch != tt::ARCH::GRAYSKULL, "Op is not supported on Grayskull");
Tensor floor_res = ttnn::floor(queue_id, input, output_mem_config, output_tensor);
Tensor floor_res = ttnn::floor(queue_id, input, output_mem_config);
ttnn::where(queue_id, ttnn::ne(queue_id, input, floor_res), ttnn::add(queue_id, floor_res, 1.0f, std::nullopt, output_mem_config), floor_res, output_mem_config, output_tensor);
ttnn::where(queue_id, ttnn::gtz(queue_id, input, output_mem_config), floor_res, output_tensor.value(), output_mem_config, output_tensor);
return output_tensor.value();
}

Tensor ExecuteTrunc::invoke(const Tensor& input, const std::optional<MemoryConfig>& output_mem_config) {
return ExecuteTrunc::invoke(DefaultQueueId, input, output_mem_config);
Tensor ExecuteTrunc::invoke(const Tensor& input, const std::optional<MemoryConfig>& output_mem_config, std::optional<Tensor> output_tensor) {
return ExecuteTrunc::invoke(DefaultQueueId, input, output_mem_config, output_tensor);
}

// Function variance of whole tensor.
Expand Down
3 changes: 2 additions & 1 deletion ttnn/cpp/ttnn/operations/eltwise/unary/unary_composite.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ struct ExecuteTrunc {

static Tensor invoke(
const Tensor& input_tensor,
const std::optional<MemoryConfig>& memory_config = std::nullopt);
const std::optional<MemoryConfig>& memory_config = std::nullopt,
std::optional<Tensor> optional_output_tensor = std::nullopt);
};

//OpHandler_float : get_function_type_float
Expand Down

0 comments on commit 8165bdf

Please sign in to comment.