Skip to content

Commit

Permalink
Add as_cpu to TensorCPU and TensorListCPU
Browse files Browse the repository at this point in the history
Signed-off-by: Joaquin Anton Guirao <[email protected]>
  • Loading branch information
jantonguirao committed Dec 16, 2024
1 parent f6e2c00 commit 5e67bc7
Show file tree
Hide file tree
Showing 27 changed files with 70 additions and 132 deletions.
9 changes: 9 additions & 0 deletions dali/python/backend_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -718,6 +718,11 @@ void ExposeTensor(py::module &m) {
Returns a `TensorGPU` object being a copy of this `TensorCPU`.
)code",
py::return_value_policy::take_ownership)
.def("as_cpu", [](Tensor<CPUBackend> &t) -> Tensor<CPUBackend>& {
return t;
},
R"code(Bypass, since the object is already an instance of `TensorCPU`.)code",
py::return_value_policy::reference_internal)
.def("copy_to_external",
[](Tensor<CPUBackend> &t, py::object p) {
CopyToExternal<mm::memory_kind::host>(ctypes_void_ptr(p), t, AccessOrder::host(), false);
Expand Down Expand Up @@ -1174,6 +1179,10 @@ void ExposeTensorList(py::module &m) {
Returns a `TensorListGPU` object being a copy of this `TensorListCPU`.
)code",
py::return_value_policy::take_ownership)
.def("as_cpu", [](TensorList<CPUBackend> &t) {
return t;
}, R"code(No-op, as it is already an instance of `TensorListCPU`.)code",
py::return_value_policy::internal_reference)
.def("layout", [](TensorList<CPUBackend> &t) {
return t.GetLayout().str();
})
Expand Down
4 changes: 1 addition & 3 deletions dali/test/python/auto_aug/test_augmentations.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from PIL import Image, ImageEnhance, ImageOps
from nose2.tools import params, cartesian_params

import nvidia.dali.tensors as _tensors
from nvidia.dali import fn, pipeline_def
from nvidia.dali.auto_aug import augmentations as a
from nvidia.dali.auto_aug.core._utils import get_translations as _get_translations
Expand Down Expand Up @@ -91,8 +90,7 @@ def pipeline():
if dev == "gpu":
output = output.as_cpu()
output = [np.array(sample) for sample in output]
if isinstance(data, _tensors.TensorListGPU):
data = data.as_cpu()
data = data.as_cpu()
data = [np.array(sample) for sample in data]

if modality == "image":
Expand Down
5 changes: 2 additions & 3 deletions dali/test/python/auto_aug/test_rand_augment.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from scipy.stats import chisquare
from nose2.tools import params

from nvidia.dali import fn, tensors, types
from nvidia.dali import fn, types
from nvidia.dali import pipeline_def
from nvidia.dali.auto_aug import rand_augment
from nvidia.dali.auto_aug.core import augmentation
Expand All @@ -43,8 +43,7 @@ def debug_discrepancy_helper(*batch_pairs):
"""

def as_array_list(batch):
if isinstance(batch, tensors.TensorListGPU):
batch = batch.as_cpu()
batch = batch.as_cpu()
return [np.array(sample) for sample in batch]

batch_names = [name for _, _, name in batch_pairs]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import glob
import numpy as np
import itertools
import nvidia.dali as dali
from nvidia.dali import fn, pipeline_def, types
from test_utils import (
compare_pipelines,
Expand All @@ -41,9 +40,7 @@


def tensor_list_to_array(tensor_list):
if isinstance(tensor_list, dali.backend_impl.TensorListGPU):
tensor_list = tensor_list.as_cpu()
return tensor_list.as_array()
return tensor_list.as_cpu().as_array()


# Check whether a given pipeline is stateless
Expand Down
7 changes: 2 additions & 5 deletions dali/test/python/decoder/test_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import os
from itertools import cycle
from test_utils import get_dali_extra_path, is_mulit_gpu, skip_if_m60
from nvidia.dali.backend import TensorListGPU
from nose2.tools import params
from nose_utils import SkipTest, attr, assert_raises

Expand Down Expand Up @@ -77,8 +76,7 @@ def video_decoder_iter(batch_size, epochs=1, device="cpu", module=fn.experimenta
pipe.build()
for _ in range(int((epochs * len(files) + batch_size - 1) / batch_size)):
(output,) = pipe.run()
if isinstance(output, TensorListGPU):
output = output.as_cpu()
output = output.as_cpu()
for i in range(batch_size):
yield np.array(output[i])

Expand All @@ -89,8 +87,7 @@ def ref_iter(epochs=1, device="cpu"):
pipe = reference_pipeline(filename, device=device)
pipe.build()
(output,) = pipe.run()
if isinstance(output, TensorListGPU):
output = output.as_cpu()
output = output.as_cpu()
yield np.array(output[0])


Expand Down
15 changes: 4 additions & 11 deletions dali/test/python/operator_1/test_arithmetic_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali.math as math
from nvidia.dali.tensors import TensorListGPU
import numpy as np
from nose_utils import attr, raises, assert_raises, assert_equals
from nose2.tools import params
Expand Down Expand Up @@ -226,12 +225,6 @@ def default_range(*types):
]


def as_cpu(tl):
if isinstance(tl, TensorListGPU):
return tl.as_cpu()
return tl


def max_dtype(kind, left_dtype, right_dtype):
return np.dtype(kind + str(max(left_dtype.itemsize, right_dtype.itemsize)))

Expand Down Expand Up @@ -449,8 +442,8 @@ def get_numpy_input(input, kind, orig_type, target_type):


def extract_un_data(pipe_out, sample_id, kind, target_type):
input = as_cpu(pipe_out[0]).at(sample_id)
out = as_cpu(pipe_out[1]).at(sample_id)
input = pipe_out[0].at(sample_id).as_cpu()
out = pipe_out[1].at(sample_id).as_cpu()
assert_equals(out.dtype, target_type)
in_np = get_numpy_input(input, kind, input.dtype.type, target_type)
return in_np, out
Expand All @@ -465,15 +458,15 @@ def extract_data(pipe_out, sample_id, kinds, target_type):
arity = len(kinds)
inputs = []
for i in range(arity):
dali_in = as_cpu(pipe_out[i]).at(sample_id)
dali_in = pipe_out[i].at(sample_id).as_cpu()
numpy_in = get_numpy_input(
dali_in,
kinds[i],
dali_in.dtype.type,
target_type if target_type is not None else dali_in.dtype.type,
)
inputs.append(numpy_in)
out = as_cpu(pipe_out[arity]).at(sample_id)
out = pipe_out[arity].at(sample_id).as_cpu()
return tuple(inputs) + (out,)


Expand Down
9 changes: 3 additions & 6 deletions dali/test/python/operator_1/test_batch_permute.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import nvidia.dali as dali
import nvidia.dali.fn as fn
from nvidia.dali.pipeline import Pipeline
import numpy as np
Expand Down Expand Up @@ -69,8 +68,7 @@ def _test_permute_batch(device, type):
for i in range(10):
orig, permuted, idxs = pipe.run()
idxs = [int(idxs.at(i)) for i in range(batch_size)]
if isinstance(orig, dali.backend.TensorListGPU):
orig = orig.as_cpu()
orig = orig.as_cpu()
ref = [orig.at(idx) for idx in idxs]
check_batch(permuted, ref, len(ref), 0, 0, "abc")

Expand All @@ -91,10 +89,9 @@ def _test_permute_batch_fixed(device):
pipe.set_outputs(data, fn.permute_batch(data, indices=idxs))
pipe.build()

for i in range(10):
for _ in range(10):
orig, permuted = pipe.run()
if isinstance(orig, dali.backend.TensorListGPU):
orig = orig.as_cpu()
orig = orig.as_cpu()
ref = [orig.at(idx) for idx in idxs]
check_batch(permuted, ref, len(ref), 0, 0, "abc")

Expand Down
5 changes: 2 additions & 3 deletions dali/test/python/operator_1/test_coin_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

import numpy as np
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
from nvidia.dali.pipeline import Pipeline


Expand Down Expand Up @@ -53,10 +52,10 @@ def shape_gen_f():
pipe.set_outputs(*outputs)
pipe.build()
outputs = pipe.run()
data_out = outputs[0].as_cpu() if isinstance(outputs[0], TensorListGPU) else outputs[0]
data_out = outputs[0].as_cpu()
shapes_out = None
if max_shape is not None:
shapes_out = outputs[1].as_cpu() if isinstance(outputs[1], TensorListGPU) else outputs[1]
shapes_out = outputs[1].as_cpu()
p = p if p is not None else 0.5
for i in range(batch_size):
data = np.array(data_out[i])
Expand Down
2 changes: 1 addition & 1 deletion dali/test/python/operator_1/test_coord_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def check_operator_coord_flip(device, batch_size, layout, shape, center_x, cente
for sample in range(batch_size):
in_coords = outputs[0].at(sample)
if device == "gpu":
out_coords = outputs[1].as_cpu().at(sample)
out_coords = outputs[1].at(sample).as_cpu()
else:
out_coords = outputs[1].at(sample)
if in_coords.shape == () or in_coords.shape[0] == 0:
Expand Down
12 changes: 3 additions & 9 deletions dali/test/python/operator_1/test_crop.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
from nvidia.dali import pipeline_def, fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
import numpy as np
import os
from nose_utils import assert_raises
Expand Down Expand Up @@ -580,15 +579,10 @@ def check_crop_with_out_of_bounds_policy_support(
if fill_values is None:
fill_values = 0
pipe.build()
for k in range(3):
for _ in range(3):
outs = pipe.run()
out = outs[0]
in_data = outs[1]
if isinstance(out, dali.backend_impl.TensorListGPU):
out = out.as_cpu()
if isinstance(in_data, dali.backend_impl.TensorListGPU):
in_data = in_data.as_cpu()

out = outs[0].as_cpu()
in_data = outs[1].as_cpu()
assert batch_size == len(out)
for idx in range(batch_size):
sample_in = in_data.at(idx)
Expand Down
16 changes: 3 additions & 13 deletions dali/test/python/operator_1/test_crop_mirror_normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.

import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
Expand Down Expand Up @@ -773,10 +772,8 @@ def check_cmn_with_out_of_bounds_policy_support(
out = outs[0]
in_data = outs[1]
mirror_data = outs[2]
if isinstance(out, dali.backend_impl.TensorListGPU):
out = out.as_cpu()
if isinstance(in_data, dali.backend_impl.TensorListGPU):
in_data = in_data.as_cpu()
out = out.as_cpu()
in_data = in_data.as_cpu()

assert batch_size == len(out)
for idx in range(batch_size):
Expand Down Expand Up @@ -907,14 +904,7 @@ def pipe():
for _ in range(3):
outs = p.run()
for s in range(batch_size):
out, image_like, mean, std = [
(
np.array(o[s].as_cpu())
if isinstance(o, dali.backend_impl.TensorListGPU)
else np.array(o[s])
)
for o in outs
]
out, image_like, mean, std = [np.array(o[s].as_cpu()) for o in outs]
ref_scale = scale or 1.0
ref_shift = shift or 0.0
ref_out = ref_scale * (image_like - mean) / std + ref_shift
Expand Down
4 changes: 2 additions & 2 deletions dali/test/python/operator_1/test_input_promotion.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def test_slice_fn():
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].at(0).as_cpu(), np.array([[14], [17]]))


def test_slice_ops():
Expand All @@ -62,7 +62,7 @@ def test_slice_ops():
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].at(0).as_cpu(), np.array([[14], [17]]))


def test_python_function():
Expand Down
6 changes: 1 addition & 5 deletions dali/test/python/operator_1/test_normal_distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.

from nvidia.dali.pipeline import Pipeline
from nvidia.dali.backend_impl import TensorListGPU
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
Expand Down Expand Up @@ -107,10 +106,7 @@ def shape_gen_f():
pipe.build()
for i in range(niter):
outputs = pipe.run()
out, shapes, means, stddevs = tuple(
outputs[i].as_cpu() if isinstance(outputs[i], TensorListGPU) else outputs[i]
for i in range(len(outputs))
)
out, shapes, means, stddevs = tuple(outputs[i].as_cpu() for i in range(len(outputs)))
for sample_idx in range(batch_size):
sample = np.array(out[sample_idx])
if sample.shape == ():
Expand Down
4 changes: 1 addition & 3 deletions dali/test/python/operator_1/test_normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.

from nvidia.dali.pipeline import Pipeline
from nvidia.dali import backend
import nvidia.dali.ops as ops
import numpy as np
from test_utils import dali_type
Expand Down Expand Up @@ -395,8 +394,7 @@ def iter_setup(self):


def to_list(tensor_list):
if isinstance(tensor_list, backend.TensorListGPU):
tensor_list = tensor_list.as_cpu()
tensor_list = tensor_list.as_cpu()
out = []
for i in range(len(tensor_list)):
out.append(tensor_list.at(i))
Expand Down
10 changes: 4 additions & 6 deletions dali/test/python/operator_1/test_pad.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,11 @@
# limitations under the License.

import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.math as math
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali import Pipeline, pipeline_def
from nvidia.dali.backend_impl import TensorListGPU

from nose_utils import assert_raises
from test_utils import (
Expand Down Expand Up @@ -104,7 +102,7 @@ def check_pad(device, batch_size, input_max_shape, axes, axis_names, align, shap
for k in range(5):
out1, out2 = pipe.run()

out1_data = out1.as_cpu() if isinstance(out1[0], dali.backend_impl.TensorGPU) else out1
out1_data = out1.as_cpu()
max_shape = [-1] * len(input_max_shape)

for i in range(len(actual_axes)):
Expand All @@ -116,7 +114,7 @@ def check_pad(device, batch_size, input_max_shape, axes, axis_names, align, shap
if input_shape[dim] > max_shape[dim]:
max_shape[dim] = input_shape[dim]

out2_data = out2.as_cpu() if isinstance(out2[0], dali.backend_impl.TensorGPU) else out2
out2_data = out2.as_cpu()
for i in range(batch_size):
input_shape = out1_data.at(i).shape
output_shape = out2_data.at(i).shape
Expand Down Expand Up @@ -226,7 +224,7 @@ def check_pad_per_sample_shapes_and_alignment(device="cpu", batch_size=3, ndim=2
)
pipe.build()
for _ in range(num_iter):
outs = [out.as_cpu() if isinstance(out, TensorListGPU) else out for out in pipe.run()]
outs = [out.as_cpu() for out in pipe.run()]
for i in range(batch_size):
in_shape, in_data, req_shape, req_align, out_pad_shape, out_pad_align, out_pad_both = [
outs[out_idx].at(i) for out_idx in range(len(outs))
Expand Down Expand Up @@ -266,7 +264,7 @@ def check_pad_to_square(device="cpu", batch_size=3, ndim=2, num_iter=3):
pipe.set_outputs(in_data, out_data)
pipe.build()
for _ in range(num_iter):
outs = [out.as_cpu() if isinstance(out, TensorListGPU) else out for out in pipe.run()]
outs = [out.as_cpu() for out in pipe.run()]
for i in range(batch_size):
in_data, out_data = [outs[out_idx].at(i) for out_idx in range(len(outs))]
in_shape = in_data.shape
Expand Down
2 changes: 1 addition & 1 deletion dali/test/python/operator_2/test_python_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def test_python_operator_brightness():
(numpy_output,) = numpy_brightness.run()
(dali_output,) = dali_brightness.run()
for i in range(len(dali_output)):
assert numpy.allclose(numpy_output.at(i), dali_output.as_cpu().at(i), rtol=1e-5, atol=1)
assert numpy.allclose(numpy_output.at(i), dali_output.at(i).as_cpu(), rtol=1e-5, atol=1)


def invalid_function(image):
Expand Down
Loading

0 comments on commit 5e67bc7

Please sign in to comment.