Skip to content

Commit

Permalink
Add as_cpu to TensorCPU and TensorListCPU
Browse files Browse the repository at this point in the history
Signed-off-by: Joaquin Anton Guirao <[email protected]>
  • Loading branch information
jantonguirao committed Dec 16, 2024
1 parent f6e2c00 commit 3c5ca4e
Show file tree
Hide file tree
Showing 27 changed files with 66 additions and 110 deletions.
9 changes: 9 additions & 0 deletions dali/python/backend_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -718,6 +718,11 @@ void ExposeTensor(py::module &m) {
Returns a `TensorGPU` object being a copy of this `TensorCPU`.
)code",
py::return_value_policy::take_ownership)
.def("as_cpu", [](Tensor<CPUBackend> &t) -> Tensor<CPUBackend>& {
return t;
},
R"code(Bypass, since the object is already an instance of `TensorCPU`.)code",
py::return_value_policy::reference_internal)
.def("copy_to_external",
[](Tensor<CPUBackend> &t, py::object p) {
CopyToExternal<mm::memory_kind::host>(ctypes_void_ptr(p), t, AccessOrder::host(), false);
Expand Down Expand Up @@ -1174,6 +1179,10 @@ void ExposeTensorList(py::module &m) {
Returns a `TensorListGPU` object being a copy of this `TensorListCPU`.
)code",
py::return_value_policy::take_ownership)
.def("as_cpu", [](TensorList<CPUBackend> &t) {
return t;
}, R"code(Bypass, as it is already an instance of `TensorListCPU`.)code",
py::return_value_policy::internal_reference)
.def("layout", [](TensorList<CPUBackend> &t) {
return t.GetLayout().str();
})
Expand Down
3 changes: 1 addition & 2 deletions dali/test/python/auto_aug/test_augmentations.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,7 @@ def pipeline():
if dev == "gpu":
output = output.as_cpu()
output = [np.array(sample) for sample in output]
if isinstance(data, _tensors.TensorListGPU):
data = data.as_cpu()
data = data.as_cpu()
data = [np.array(sample) for sample in data]

if modality == "image":
Expand Down
3 changes: 1 addition & 2 deletions dali/test/python/auto_aug/test_rand_augment.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@ def debug_discrepancy_helper(*batch_pairs):
"""

def as_array_list(batch):
if isinstance(batch, tensors.TensorListGPU):
batch = batch.as_cpu()
batch = batch.as_cpu()
return [np.array(sample) for sample in batch]

batch_names = [name for _, _, name in batch_pairs]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,7 @@


def tensor_list_to_array(tensor_list):
if isinstance(tensor_list, dali.backend_impl.TensorListGPU):
tensor_list = tensor_list.as_cpu()
return tensor_list.as_array()
return tensor_list.as_cpu().as_array()


# Check whether a given pipeline is stateless
Expand Down
6 changes: 2 additions & 4 deletions dali/test/python/decoder/test_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,7 @@ def video_decoder_iter(batch_size, epochs=1, device="cpu", module=fn.experimenta
pipe.build()
for _ in range(int((epochs * len(files) + batch_size - 1) / batch_size)):
(output,) = pipe.run()
if isinstance(output, TensorListGPU):
output = output.as_cpu()
output = output.as_cpu()
for i in range(batch_size):
yield np.array(output[i])

Expand All @@ -89,8 +88,7 @@ def ref_iter(epochs=1, device="cpu"):
pipe = reference_pipeline(filename, device=device)
pipe.build()
(output,) = pipe.run()
if isinstance(output, TensorListGPU):
output = output.as_cpu()
output = output.as_cpu()
yield np.array(output[0])


Expand Down
10 changes: 2 additions & 8 deletions dali/test/python/operator_1/test_arithmetic_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,12 +226,6 @@ def default_range(*types):
]


def as_cpu(tl):
if isinstance(tl, TensorListGPU):
return tl.as_cpu()
return tl


def max_dtype(kind, left_dtype, right_dtype):
return np.dtype(kind + str(max(left_dtype.itemsize, right_dtype.itemsize)))

Expand Down Expand Up @@ -449,8 +443,8 @@ def get_numpy_input(input, kind, orig_type, target_type):


def extract_un_data(pipe_out, sample_id, kind, target_type):
input = as_cpu(pipe_out[0]).at(sample_id)
out = as_cpu(pipe_out[1]).at(sample_id)
input = pipe_out[0].at(sample_id).as_cpu()
out = pipe_out[1].at(sample_id).as_cpu()
assert_equals(out.dtype, target_type)
in_np = get_numpy_input(input, kind, input.dtype.type, target_type)
return in_np, out
Expand Down
8 changes: 3 additions & 5 deletions dali/test/python/operator_1/test_batch_permute.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,7 @@ def _test_permute_batch(device, type):
for i in range(10):
orig, permuted, idxs = pipe.run()
idxs = [int(idxs.at(i)) for i in range(batch_size)]
if isinstance(orig, dali.backend.TensorListGPU):
orig = orig.as_cpu()
orig = orig.as_cpu()
ref = [orig.at(idx) for idx in idxs]
check_batch(permuted, ref, len(ref), 0, 0, "abc")

Expand All @@ -91,10 +90,9 @@ def _test_permute_batch_fixed(device):
pipe.set_outputs(data, fn.permute_batch(data, indices=idxs))
pipe.build()

for i in range(10):
for _ in range(10):
orig, permuted = pipe.run()
if isinstance(orig, dali.backend.TensorListGPU):
orig = orig.as_cpu()
orig = orig.as_cpu()
ref = [orig.at(idx) for idx in idxs]
check_batch(permuted, ref, len(ref), 0, 0, "abc")

Expand Down
4 changes: 2 additions & 2 deletions dali/test/python/operator_1/test_coin_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ def shape_gen_f():
pipe.set_outputs(*outputs)
pipe.build()
outputs = pipe.run()
data_out = outputs[0].as_cpu() if isinstance(outputs[0], TensorListGPU) else outputs[0]
data_out = outputs[0].as_cpu()
shapes_out = None
if max_shape is not None:
shapes_out = outputs[1].as_cpu() if isinstance(outputs[1], TensorListGPU) else outputs[1]
shapes_out = outputs[1].as_cpu()
p = p if p is not None else 0.5
for i in range(batch_size):
data = np.array(data_out[i])
Expand Down
2 changes: 1 addition & 1 deletion dali/test/python/operator_1/test_coord_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def check_operator_coord_flip(device, batch_size, layout, shape, center_x, cente
for sample in range(batch_size):
in_coords = outputs[0].at(sample)
if device == "gpu":
out_coords = outputs[1].as_cpu().at(sample)
out_coords = outputs[1].at(sample).as_cpu()
else:
out_coords = outputs[1].at(sample)
if in_coords.shape == () or in_coords.shape[0] == 0:
Expand Down
11 changes: 3 additions & 8 deletions dali/test/python/operator_1/test_crop.py
Original file line number Diff line number Diff line change
Expand Up @@ -580,15 +580,10 @@ def check_crop_with_out_of_bounds_policy_support(
if fill_values is None:
fill_values = 0
pipe.build()
for k in range(3):
for _ in range(3):
outs = pipe.run()
out = outs[0]
in_data = outs[1]
if isinstance(out, dali.backend_impl.TensorListGPU):
out = out.as_cpu()
if isinstance(in_data, dali.backend_impl.TensorListGPU):
in_data = in_data.as_cpu()

out = outs[0].as_cpu()
in_data = outs[1].as_cpu()
assert batch_size == len(out)
for idx in range(batch_size):
sample_in = in_data.at(idx)
Expand Down
12 changes: 3 additions & 9 deletions dali/test/python/operator_1/test_crop_mirror_normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -773,10 +773,8 @@ def check_cmn_with_out_of_bounds_policy_support(
out = outs[0]
in_data = outs[1]
mirror_data = outs[2]
if isinstance(out, dali.backend_impl.TensorListGPU):
out = out.as_cpu()
if isinstance(in_data, dali.backend_impl.TensorListGPU):
in_data = in_data.as_cpu()
out = out.as_cpu()
in_data = in_data.as_cpu()

assert batch_size == len(out)
for idx in range(batch_size):
Expand Down Expand Up @@ -908,11 +906,7 @@ def pipe():
outs = p.run()
for s in range(batch_size):
out, image_like, mean, std = [
(
np.array(o[s].as_cpu())
if isinstance(o, dali.backend_impl.TensorListGPU)
else np.array(o[s])
)
(np.array(o[s].as_cpu()))
for o in outs
]
ref_scale = scale or 1.0
Expand Down
4 changes: 2 additions & 2 deletions dali/test/python/operator_1/test_input_promotion.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def test_slice_fn():
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].at(0).as_cpu(), np.array([[14], [17]]))


def test_slice_ops():
Expand All @@ -62,7 +62,7 @@ def test_slice_ops():
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].at(0).as_cpu(), np.array([[14], [17]]))


def test_python_function():
Expand Down
5 changes: 1 addition & 4 deletions dali/test/python/operator_1/test_normal_distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,7 @@ def shape_gen_f():
pipe.build()
for i in range(niter):
outputs = pipe.run()
out, shapes, means, stddevs = tuple(
outputs[i].as_cpu() if isinstance(outputs[i], TensorListGPU) else outputs[i]
for i in range(len(outputs))
)
out, shapes, means, stddevs = tuple(outputs[i].as_cpu() for i in range(len(outputs)))
for sample_idx in range(batch_size):
sample = np.array(out[sample_idx])
if sample.shape == ():
Expand Down
3 changes: 1 addition & 2 deletions dali/test/python/operator_1/test_normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,8 +395,7 @@ def iter_setup(self):


def to_list(tensor_list):
if isinstance(tensor_list, backend.TensorListGPU):
tensor_list = tensor_list.as_cpu()
tensor_list = tensor_list.as_cpu()
out = []
for i in range(len(tensor_list)):
out.append(tensor_list.at(i))
Expand Down
8 changes: 4 additions & 4 deletions dali/test/python/operator_1/test_pad.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def check_pad(device, batch_size, input_max_shape, axes, axis_names, align, shap
for k in range(5):
out1, out2 = pipe.run()

out1_data = out1.as_cpu() if isinstance(out1[0], dali.backend_impl.TensorGPU) else out1
out1_data = out1.as_cpu()
max_shape = [-1] * len(input_max_shape)

for i in range(len(actual_axes)):
Expand All @@ -116,7 +116,7 @@ def check_pad(device, batch_size, input_max_shape, axes, axis_names, align, shap
if input_shape[dim] > max_shape[dim]:
max_shape[dim] = input_shape[dim]

out2_data = out2.as_cpu() if isinstance(out2[0], dali.backend_impl.TensorGPU) else out2
out2_data = out2.as_cpu()
for i in range(batch_size):
input_shape = out1_data.at(i).shape
output_shape = out2_data.at(i).shape
Expand Down Expand Up @@ -226,7 +226,7 @@ def check_pad_per_sample_shapes_and_alignment(device="cpu", batch_size=3, ndim=2
)
pipe.build()
for _ in range(num_iter):
outs = [out.as_cpu() if isinstance(out, TensorListGPU) else out for out in pipe.run()]
outs = [out.as_cpu() for out in pipe.run()]
for i in range(batch_size):
in_shape, in_data, req_shape, req_align, out_pad_shape, out_pad_align, out_pad_both = [
outs[out_idx].at(i) for out_idx in range(len(outs))
Expand Down Expand Up @@ -266,7 +266,7 @@ def check_pad_to_square(device="cpu", batch_size=3, ndim=2, num_iter=3):
pipe.set_outputs(in_data, out_data)
pipe.build()
for _ in range(num_iter):
outs = [out.as_cpu() if isinstance(out, TensorListGPU) else out for out in pipe.run()]
outs = [out.as_cpu() for out in pipe.run()]
for i in range(batch_size):
in_data, out_data = [outs[out_idx].at(i) for out_idx in range(len(outs))]
in_shape = in_data.shape
Expand Down
2 changes: 1 addition & 1 deletion dali/test/python/operator_2/test_python_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def test_python_operator_brightness():
(numpy_output,) = numpy_brightness.run()
(dali_output,) = dali_brightness.run()
for i in range(len(dali_output)):
assert numpy.allclose(numpy_output.at(i), dali_output.as_cpu().at(i), rtol=1e-5, atol=1)
assert numpy.allclose(numpy_output.at(i), dali_output.at(i).as_cpu(), rtol=1e-5, atol=1)


def invalid_function(image):
Expand Down
12 changes: 2 additions & 10 deletions dali/test/python/operator_2/test_remap.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,16 +176,8 @@ def _compare_pipelines_pixelwise(self, pipe1, pipe2, N_iterations, eps=0.01):
f"Numbers of outputs in the pipelines does not match: {len(out1)} vs {len(out2)}.",
)
for i in range(len(out1)):
out1_data = (
out1[i].as_cpu()
if isinstance(out1[i][0], dali.backend_impl.TensorGPU)
else out1[i]
)
out2_data = (
out2[i].as_cpu()
if isinstance(out2[i][0], dali.backend_impl.TensorGPU)
else out2[i]
)
out1_data = out1[i].as_cpu()
out2_data = out2[i].as_cpu()
for sample1, sample2 in zip(out1_data, out2_data):
s1 = np.array(sample1)
s2 = np.array(sample2)
Expand Down
10 changes: 3 additions & 7 deletions dali/test/python/operator_2/test_resize.py
Original file line number Diff line number Diff line change
Expand Up @@ -571,18 +571,14 @@ def get_output():
print("Requested output", size[i])
assert max_err <= eps

ref_in = dali_in
if isinstance(ref_in, dali.tensors.TensorListGPU):
ref_in = ref_in.as_cpu() # suppress warnings
ref_in = dali_in.as_cpu()
pil_pipe.feed_input("images", ref_in, layout=layout_str(dim, channel_first))
pil_pipe.feed_input("size", dali_out_size)
pil_pipe.feed_input("roi_start", roi_start)
pil_pipe.feed_input("roi_end", roi_end)
ref = pil_pipe.run()

dali_resized = o[1]
if isinstance(dali_resized, dali.tensors.TensorListGPU):
dali_resized = dali_resized.as_cpu()
dali_resized = o[1].as_cpu()
ref_resized = ref[0]

max_avg_err = 0.6 if dim == 3 else 0.4
Expand Down Expand Up @@ -874,7 +870,7 @@ def resize_pipe():
pipe = resize_pipe()
pipe.build()
(outs,) = pipe.run()
out = outs.as_cpu().at(0)
out = outs.at(0).as_cpu()
global large_data_resized
if large_data_resized is None:
large_data_resized = make_cube(350, 224, 224)
Expand Down
2 changes: 1 addition & 1 deletion dali/test/python/operator_2/test_resize_seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def init_video_data():

video_pipe.build()
out = video_pipe.run()
in_seq = out[0].as_cpu().at(0)
in_seq = out[0].at(0).as_cpu()
return in_seq


Expand Down
16 changes: 8 additions & 8 deletions dali/test/python/operator_2/test_subscript.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def test_plain_indexing():
for i in range(len(inp)):
x = inp.at(i)
assert np.array_equal(x[1, 1], cpu.at(i))
assert np.array_equal(x[1, 1], gpu.as_cpu().at(i))
assert np.array_equal(x[1, 1], gpu.at(i).as_cpu())


def _test_indexing(data_gen, input_layout, output_layout, dali_index_func, ref_index_func=None):
Expand All @@ -50,7 +50,7 @@ def _test_indexing(data_gen, input_layout, output_layout, dali_index_func, ref_i
x = inp.at(i)
ref = (ref_index_func or dali_index_func)(x)
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
assert np.array_equal(ref, gpu.at(i).as_cpu())
assert cpu.layout() == output_layout
assert gpu.layout() == output_layout

Expand Down Expand Up @@ -94,7 +94,7 @@ def test_swapped_ends():
for i in range(len(inp)):
x = inp.at(i)
assert np.array_equal(x[2:1], cpu.at(i))
assert np.array_equal(x[2:1], gpu.as_cpu().at(i))
assert np.array_equal(x[2:1], gpu.at(i).as_cpu())


def test_noop():
Expand Down Expand Up @@ -129,7 +129,7 @@ def data_gen():
j = (j + 1) % len(lo_idxs)
k = (k + 1) % len(hi_idxs)
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
assert np.array_equal(ref, gpu.at(i).as_cpu())


def test_runtime_stride_dim1():
Expand All @@ -154,7 +154,7 @@ def data_gen():
ref = x[::strides[j]]
# fmt: on
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
assert np.array_equal(ref, gpu.at(i).as_cpu())
j = (j + 1) % len(strides)


Expand All @@ -180,7 +180,7 @@ def data_gen():
ref = x[:, ::strides[j]]
# fmt: on
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
assert np.array_equal(ref, gpu.at(i).as_cpu())
j = (j + 1) % len(strides)


Expand Down Expand Up @@ -304,7 +304,7 @@ def test_multiple_skipped_dims():
for i in range(len(inp)):
x = inp.at(i)
assert np.array_equal(x[1, :, :, 1], cpu.at(i))
assert np.array_equal(x[1, :, :, 1], gpu.as_cpu().at(i))
assert np.array_equal(x[1, :, :, 1], gpu.at(i).as_cpu())


def test_empty_slice():
Expand All @@ -316,4 +316,4 @@ def test_empty_slice():
for i in range(len(inp)):
x = inp.at(i)
assert np.array_equal(x[0:0, 0:1], cpu.at(i))
assert np.array_equal(x[0:0, 0:1], gpu.as_cpu().at(i))
assert np.array_equal(x[0:0, 0:1], gpu.at(i).as_cpu())
Loading

0 comments on commit 3c5ca4e

Please sign in to comment.