Skip to content

Commit

Permalink
Remove the sym_ symbols.
Browse files Browse the repository at this point in the history
  • Loading branch information
LaurentMazare committed Oct 1, 2023
1 parent b3498a7 commit 1fb6257
Show file tree
Hide file tree
Showing 9 changed files with 4 additions and 157 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ serde_json = { version = "1.0.96", optional = true }
memmap2 = { version = "0.6.1", optional = true }

[dev-dependencies]
anyhow = "1"
anyhow = "^1.0.60"

[workspace]
members = [
Expand Down
1 change: 1 addition & 0 deletions gen/gen.ml
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ let excluded_prefixes =
; "_amp_foreach"
; "_nested_tensor"
; "_fused_adam"
; "sym_"
]

let excluded_suffixes = [ "_forward"; "_forward_out" ]
Expand Down
58 changes: 0 additions & 58 deletions src/wrappers/tensor_fallible_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34195,64 +34195,6 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_sym_constrain_range<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
) -> Result<(), TchError> {
let min = min.into();
let max = max.into();
unsafe_torch_err!(atg_sym_constrain_range(
size.into().c_scalar,
min.unwrap_or(0i64),
min.is_none() as i8,
max.unwrap_or(0i64),
max.is_none() as i8
));
Ok(())
}

pub fn f_sym_constrain_range_for_size<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
) -> Result<(), TchError> {
let min = min.into();
let max = max.into();
unsafe_torch_err!(atg_sym_constrain_range_for_size(
size.into().c_scalar,
min.unwrap_or(0i64),
min.is_none() as i8,
max.unwrap_or(0i64),
max.is_none() as i8
));
Ok(())
}

pub fn f_sym_numel(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_sym_numel(self.c_tensor));
Ok(return_)
}

pub fn f_sym_size(&self, dim: i64) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_sym_size(self.c_tensor, dim));
Ok(return_)
}

pub fn f_sym_storage_offset(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_sym_storage_offset(self.c_tensor));
Ok(return_)
}

pub fn f_sym_stride(&self, dim: i64) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_sym_stride(self.c_tensor, dim));
Ok(return_)
}

pub fn f_tr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_t(c_tensors.as_mut_ptr(), self.c_tensor));
Expand Down
32 changes: 0 additions & 32 deletions src/wrappers/tensor_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17233,38 +17233,6 @@ impl Tensor {
self.f_swapdims_(dim0, dim1).unwrap()
}

pub fn sym_constrain_range<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
) {
Tensor::f_sym_constrain_range(size, min, max).unwrap()
}

pub fn sym_constrain_range_for_size<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
) {
Tensor::f_sym_constrain_range_for_size(size, min, max).unwrap()
}

pub fn sym_numel(&self) -> i64 {
self.f_sym_numel().unwrap()
}

pub fn sym_size(&self, dim: i64) -> i64 {
self.f_sym_size(dim).unwrap()
}

pub fn sym_storage_offset(&self) -> i64 {
self.f_sym_storage_offset().unwrap()
}

pub fn sym_stride(&self, dim: i64) -> i64 {
self.f_sym_stride(dim).unwrap()
}

pub fn tr(&self) -> Tensor {
self.f_tr().unwrap()
}
Expand Down
2 changes: 1 addition & 1 deletion torch-sys/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ license = "MIT/Apache-2.0"
libc = "0.2.0"

[build-dependencies]
anyhow = "1.0"
anyhow = "^1.0.60"
cc = "1.0.61"
ureq = { version = "2.6", optional = true, features = ["json"] }
serde_json = { version = "1.0", optional = true }
Expand Down
2 changes: 1 addition & 1 deletion torch-sys/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ impl SystemInfo {
.warnings(false)
.includes(&self.libtorch_include_dirs)
.flag(&format!("-Wl,-rpath={}", self.libtorch_lib_dir.display()))
.flag("-std=c++14")
.flag("-std=c++17")
.flag(&format!("-D_GLIBCXX_USE_CXX11_ABI={}", self.cxx11_abi))
.files(&c_files)
.compile("tch");
Expand Down
40 changes: 0 additions & 40 deletions torch-sys/libtch/torch_api_generated.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16907,46 +16907,6 @@ void atg_swapdims_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) {
)
}

void atg_sym_constrain_range(scalar size, int64_t min_v, uint8_t min_null, int64_t max_v, uint8_t max_null) {
PROTECT(
torch::sym_constrain_range(*size, min_null ? c10::nullopt : c10::optional<int64_t>(min_v), max_null ? c10::nullopt : c10::optional<int64_t>(max_v));
)
}

void atg_sym_constrain_range_for_size(scalar size, int64_t min_v, uint8_t min_null, int64_t max_v, uint8_t max_null) {
PROTECT(
torch::sym_constrain_range_for_size(*size, min_null ? c10::nullopt : c10::optional<int64_t>(min_v), max_null ? c10::nullopt : c10::optional<int64_t>(max_v));
)
}

int64_t atg_sym_numel(tensor self) {
PROTECT(
return torch::sym_numel(*self);
)
return 0;
}

int64_t atg_sym_size(tensor self, int64_t dim) {
PROTECT(
return torch::sym_size(*self, dim);
)
return 0;
}

int64_t atg_sym_storage_offset(tensor self) {
PROTECT(
return torch::sym_storage_offset(*self);
)
return 0;
}

int64_t atg_sym_stride(tensor self, int64_t dim) {
PROTECT(
return torch::sym_stride(*self, dim);
)
return 0;
}

void atg_t(tensor *out__, tensor self) {
PROTECT(
auto outputs__ = torch::t(*self);
Expand Down
6 changes: 0 additions & 6 deletions torch-sys/libtch/torch_api_generated.h
Original file line number Diff line number Diff line change
Expand Up @@ -2352,12 +2352,6 @@ void atg_swapaxes(tensor *, tensor self, int64_t axis0, int64_t axis1);
void atg_swapaxes_(tensor *, tensor self, int64_t axis0, int64_t axis1);
void atg_swapdims(tensor *, tensor self, int64_t dim0, int64_t dim1);
void atg_swapdims_(tensor *, tensor self, int64_t dim0, int64_t dim1);
void atg_sym_constrain_range(scalar size, int64_t min_v, uint8_t min_null, int64_t max_v, uint8_t max_null);
void atg_sym_constrain_range_for_size(scalar size, int64_t min_v, uint8_t min_null, int64_t max_v, uint8_t max_null);
int64_t atg_sym_numel(tensor self);
int64_t atg_sym_size(tensor self, int64_t dim);
int64_t atg_sym_storage_offset(tensor self);
int64_t atg_sym_stride(tensor self, int64_t dim);
void atg_t(tensor *, tensor self);
void atg_t_(tensor *, tensor self);
void atg_t_copy(tensor *, tensor self);
Expand Down
18 changes: 0 additions & 18 deletions torch-sys/src/c_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14065,24 +14065,6 @@ extern "C" {
pub fn atg_swapaxes_(out__: *mut *mut C_tensor, self_: *mut C_tensor, axis0_: i64, axis1_: i64);
pub fn atg_swapdims(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
pub fn atg_swapdims_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
pub fn atg_sym_constrain_range(
size_: *mut C_scalar,
min_v: i64,
min_null: i8,
max_v: i64,
max_null: i8,
);
pub fn atg_sym_constrain_range_for_size(
size_: *mut C_scalar,
min_v: i64,
min_null: i8,
max_v: i64,
max_null: i8,
);
pub fn atg_sym_numel(self_: *mut C_tensor) -> i64;
pub fn atg_sym_size(self_: *mut C_tensor, dim_: i64) -> i64;
pub fn atg_sym_storage_offset(self_: *mut C_tensor) -> i64;
pub fn atg_sym_stride(self_: *mut C_tensor, dim_: i64) -> i64;
pub fn atg_t(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_t_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_t_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
Expand Down

0 comments on commit 1fb6257

Please sign in to comment.