diff --git a/autograd/context.v b/autograd/context.v index e6583cdd..68907c64 100644 --- a/autograd/context.v +++ b/autograd/context.v @@ -6,7 +6,7 @@ import vtl // a number of operations. Variables that interact with each // other must belong to the same context, or state will be // lost while tracking operations done. -[heap] +@[heap] pub struct Context[T] { pub mut: // A list of all variables present in an operation. @@ -45,7 +45,7 @@ pub fn (mut ctx Context[T]) pop[T]() !&Node[T] { return ctx.nodes.pop() } -[params] +@[params] pub struct ContextVariableData { requires_grad bool = true } diff --git a/autograd/node.v b/autograd/node.v index 35979770..6ed0be88 100644 --- a/autograd/node.v +++ b/autograd/node.v @@ -3,7 +3,7 @@ module autograd // Node is a member of a computational graph that contains // a reference to a gate, as well as the parents of the operation // and the payload that resulted from the operation. -[heap] +@[heap] pub struct Node[T] { pub: // A Gate containing a backwards and cache function for diff --git a/autograd/payload.v b/autograd/payload.v index a7e6723c..903c2d3d 100644 --- a/autograd/payload.v +++ b/autograd/payload.v @@ -3,7 +3,7 @@ module autograd // Payload is a simple wrapper around a Variable. It // is only abstracted out to be a bit more explicit that // it is being passed around through an operation -[heap] +@[heap] pub struct Payload[T] { pub: // Contents of the paylod diff --git a/autograd/variable.v b/autograd/variable.v index 02e5ab1f..43350790 100644 --- a/autograd/variable.v +++ b/autograd/variable.v @@ -9,7 +9,7 @@ import vtl // This is the fundamental object used in automatic // differentiation, as well as the neural network aspects // of VTL -[heap] +@[heap] pub struct Variable[T] { pub mut: // The value of the Variable. This should not be edited outside @@ -30,7 +30,7 @@ pub mut: requires_grad bool } -[params] +@[params] pub struct VariableData { requires_grad bool = true } diff --git a/datasets/loader.v b/datasets/loader.v index a0921bfc..7faaa3e1 100644 --- a/datasets/loader.v +++ b/datasets/loader.v @@ -13,7 +13,7 @@ fn get_cache_dir(subdir ...string) string { return os.join_path(cache_dir, ...subdir) } -[params] +@[params] struct RawDownload { url string target string @@ -39,7 +39,7 @@ fn load_from_url(data RawDownload) ! { http.download_file(data.url, cache_file_path)! } -[params] +@[params] struct DatasetDownload { dataset string baseurl string diff --git a/ml/metrics/common_error_functions.v b/ml/metrics/common_error_functions.v index 647de401..a73b4e34 100644 --- a/ml/metrics/common_error_functions.v +++ b/ml/metrics/common_error_functions.v @@ -4,18 +4,18 @@ import math import vtl import vtl.stats -[inline] +@[inline] pub fn squared_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T] { diff := y.subtract(y_true)! return diff.multiply(diff) } -[inline] +@[inline] pub fn mean_squared_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !T { return stats.mean[T](squared_error[T](y, y_true)!) } -[inline] +@[inline] pub fn relative_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T] { return y.nmap([y_true], fn [T](vals []T, i []int) T { denom := math.max(math.abs(vals[1]), math.abs(vals[0])) @@ -26,17 +26,17 @@ pub fn relative_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T }) } -[inline] +@[inline] pub fn mean_relative_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !T { return stats.mean[T](relative_error[T](y, y_true)!) } -[inline] +@[inline] pub fn absolute_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T] { return y_true.subtract(y)!.abs() } -[inline] +@[inline] pub fn mean_absolute_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !T { return stats.mean[T](absolute_error[T](y, y_true)!) } diff --git a/nn/internal/activation.v b/nn/internal/activation.v index fccc0978..ab8353cf 100644 --- a/nn/internal/activation.v +++ b/nn/internal/activation.v @@ -4,13 +4,13 @@ import math import vtl // tanh squashes a real-valued number to the range [-1, 1] -[inline] +@[inline] pub fn tanh[T](x &vtl.Tensor[T]) &vtl.Tensor[T] { return vtl.tanh(x) } // deriv_tanh computes the derivative of tanh -[inline] +@[inline] pub fn deriv_tanh[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T] { return gradient.nmap([cached], fn [T](vals []T, i []int) T { return vals[0] * (vtl.cast[T](1) - vals[1] * vals[1]) @@ -18,7 +18,7 @@ pub fn deriv_tanh[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tenso } // sigmoid takes a real-valued number and squashes it to the range [0, 1] -[inline] +@[inline] pub fn sigmoid[T](x &vtl.Tensor[T]) &vtl.Tensor[T] { return x.map(fn [T](val T, i []int) T { return vtl.cast[T](1) / vtl.cast[T](1) + vtl.cast[T](math.exp(vtl.cast[f64](val))) @@ -26,7 +26,7 @@ pub fn sigmoid[T](x &vtl.Tensor[T]) &vtl.Tensor[T] { } // deriv_sigmoid computes the derivative of sigmoid -[inline] +@[inline] pub fn deriv_sigmoid[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T] { return gradient.nmap([cached], fn [T](vals []T, i []int) T { return vals[0] * (vtl.cast[T](1) - vals[0]) @@ -34,7 +34,7 @@ pub fn deriv_sigmoid[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Te } // relu activation function -[inline] +@[inline] pub fn relu[T](x &vtl.Tensor[T]) &vtl.Tensor[T] { return x.map(fn [T](val T, i []int) T { if val < 0 { @@ -45,7 +45,7 @@ pub fn relu[T](x &vtl.Tensor[T]) &vtl.Tensor[T] { } // deriv_relu computes the derivate of relu -[inline] +@[inline] pub fn deriv_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T] { return gradient.nmap([cached], fn [T](vals []T, i []int) T { if vals[0] < 0 { @@ -56,7 +56,7 @@ pub fn deriv_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tenso } // leaky_relu activation function -[inline] +@[inline] pub fn leaky_relu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] { return x.map(fn [alpha] [T](val T, i []int) T { if val < 0 { @@ -67,7 +67,7 @@ pub fn leaky_relu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] { } // deriv_leaky_relu computes the derivative of leaky_relu -[inline] +@[inline] pub fn deriv_leaky_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&vtl.Tensor[T] { return gradient.nmap([cached], fn [alpha] [T](vals []T, i []int) T { if vals[0] < 0 { @@ -78,7 +78,7 @@ pub fn deriv_leaky_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha } // elu activation function -[inline] +@[inline] pub fn elu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] { return x.map(fn [alpha] [T](val T, i []int) T { if val < 0 { @@ -89,7 +89,7 @@ pub fn elu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] { } // deriv_elu computes the derivative of elu -[inline] +@[inline] pub fn deriv_elu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&vtl.Tensor[T] { return gradient.nmap([cached], fn [alpha] [T](vals []T, i []int) T { if vals[0] < 0 { @@ -101,7 +101,7 @@ pub fn deriv_elu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&v // sigmoid_cross_entropy computes the sigmoid cross entropy between // the labels and the predictions -[inline] +@[inline] pub fn sigmoid_cross_entropy[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&vtl.Tensor[T] { sum := input.nreduce([target], vtl.cast[T](0), fn [T](acc T, vals []T, i []int) T { next := -(vals[1] * vtl.cast[T](math.max(f64(0), f64(vals[0])))) - vtl.cast[T](math.log( @@ -114,7 +114,7 @@ pub fn sigmoid_cross_entropy[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&v } // mse squared error between the labels and the predictions -[inline] +@[inline] pub fn mse[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&vtl.Tensor[T] { sum := input.nreduce([target], vtl.cast[T](0), fn [T](acc T, vals []T, i []int) T { next := vtl.cast[T](math.pow(f64(vals[0] - vals[1]), 2.0)) diff --git a/nn/layers/dropout.v b/nn/layers/dropout.v index 2ff7de22..11fb9246 100644 --- a/nn/layers/dropout.v +++ b/nn/layers/dropout.v @@ -6,7 +6,7 @@ import vtl.nn.internal import vtl.nn.gates.layers import vtl.nn.types -[params] +@[params] pub struct DropoutLayerConfig { prob f64 = 0.5 } diff --git a/nn/layers/elu.v b/nn/layers/elu.v index 8fc0c923..309d9cba 100644 --- a/nn/layers/elu.v +++ b/nn/layers/elu.v @@ -6,7 +6,7 @@ import vtl.nn.internal import vtl.nn.gates.activation import vtl.nn.types -[params] +@[params] pub struct EluLayerConfig { alpha f64 = 0.01 } diff --git a/nn/optimizers/adam.v b/nn/optimizers/adam.v index 4aeb199f..49f0f5fa 100644 --- a/nn/optimizers/adam.v +++ b/nn/optimizers/adam.v @@ -18,7 +18,7 @@ pub mut: second_moments []&vtl.Tensor[T] } -[params] +@[params] pub struct AdamOptimizerConfig { learning_rate f64 = 0.001 beta1 f64 = 0.9 diff --git a/nn/optimizers/sgd.v b/nn/optimizers/sgd.v index 640f747c..9259b683 100644 --- a/nn/optimizers/sgd.v +++ b/nn/optimizers/sgd.v @@ -11,7 +11,7 @@ pub mut: params []&autograd.Variable[T] } -[params] +@[params] pub struct SgdOptimizerConfig { learning_rate f64 = 0.001 } diff --git a/nn/types/optimizer.v b/nn/types/optimizer.v index 547de1b9..42b81678 100644 --- a/nn/types/optimizer.v +++ b/nn/types/optimizer.v @@ -5,7 +5,7 @@ import vtl.autograd // Optimizer is a generic interface for all optimizers. pub interface Optimizer[T] { mut: - params []&autograd.Variable[T] + params []&autograd.Variable[T] learning_rate f64 update() ! build_params(layers Layer[T]) diff --git a/src/assignment.v b/src/assignment.v index 4c4ff58b..073f5416 100644 --- a/src/assignment.v +++ b/src/assignment.v @@ -1,21 +1,21 @@ module vtl // set copies a scalar value into a Tensor at the provided index -[inline] +@[inline] pub fn (mut t Tensor[T]) set[T](index []int, val T) { offset := t.offset_index(index) t.data.set[T](offset, val) } // set_nth copies a scalar value into a Tensor at the provided offset -[inline] +@[inline] pub fn (mut t Tensor[T]) set_nth[T](n int, val T) { index := t.nth_index(n) t.set[T](index, val) } // fill fills an entire Tensor with a given value -[inline] +@[inline] pub fn (mut t Tensor[T]) fill[T](val T) &Tensor[T] { t.data.fill[T](val) return t diff --git a/src/broadcast.v b/src/broadcast.v index 8f524fbf..6e1dd895 100644 --- a/src/broadcast.v +++ b/src/broadcast.v @@ -120,7 +120,7 @@ fn broadcast_shapes(args ...[]int) []int { } // broadcast2 broadcasts two Tensors against each other -[inline] +@[inline] pub fn broadcast2[T](a &Tensor[T], b &Tensor[T]) !(&Tensor[T], &Tensor[T]) { shape := a.broadcastable(b)! r1 := a.broadcast_to(shape)! @@ -129,7 +129,7 @@ pub fn broadcast2[T](a &Tensor[T], b &Tensor[T]) !(&Tensor[T], &Tensor[T]) { } // broadcast3 broadcasts three Tensors against each other -[inline] +@[inline] pub fn broadcast3[T](a &Tensor[T], b &Tensor[T], c &Tensor[T]) !(&Tensor[T], &Tensor[T], &Tensor[T]) { shape := broadcast_shapes(a.shape, b.shape, c.shape) r1 := a.broadcast_to(shape)! @@ -139,7 +139,7 @@ pub fn broadcast3[T](a &Tensor[T], b &Tensor[T], c &Tensor[T]) !(&Tensor[T], &Te } // broadcast_n broadcasts N Tensors against each other -[inline] +@[inline] pub fn broadcast_n[T](ts []&Tensor[T]) ![]&Tensor[T] { shapes := ts.map(it.shape) shape := broadcast_shapes(...shapes) diff --git a/src/build.v b/src/build.v index 2c939b73..4455df28 100644 --- a/src/build.v +++ b/src/build.v @@ -2,7 +2,7 @@ module vtl import vtl.storage -[params] +@[params] pub struct TensorData { pub: memory MemoryFormat = .row_major diff --git a/src/cast.v b/src/cast.v index 31b21108..775271d1 100644 --- a/src/cast.v +++ b/src/cast.v @@ -3,7 +3,7 @@ module vtl // as_bool casts the Tensor to a Tensor of bools. // If the original Tensor is not a Tensor of bools, then each value is cast to a bool, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_bool[T]() &Tensor[bool] { $if T is bool { return t @@ -23,7 +23,7 @@ pub fn (t &Tensor[T]) as_bool[T]() &Tensor[bool] { // as_f32 casts the Tensor to a Tensor of f32s. // If the original Tensor is not a Tensor of f32s, then each value is cast to a f32, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_f32[T]() &Tensor[f32] { $if T is f32 { return t @@ -43,7 +43,7 @@ pub fn (t &Tensor[T]) as_f32[T]() &Tensor[f32] { // as_f64 casts the Tensor to a Tensor of f64s. // If the original Tensor is not a Tensor of f64s, then each value is cast to a f64, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_f64[T]() &Tensor[f64] { $if T is f64 { return t @@ -63,7 +63,7 @@ pub fn (t &Tensor[T]) as_f64[T]() &Tensor[f64] { // as_i16 casts the Tensor to a Tensor of i16 values. // If the original Tensor is not a Tensor of i16s, then each value is cast to a i16, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_i16[T]() &Tensor[i16] { $if T is i16 { return t @@ -83,7 +83,7 @@ pub fn (t &Tensor[T]) as_i16[T]() &Tensor[i16] { // as_i8 casts the Tensor to a Tensor of i8 values. // If the original Tensor is not a Tensor of i8s, then each value is cast to a i8, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_i8[T]() &Tensor[i8] { $if T is i8 { return t @@ -103,7 +103,7 @@ pub fn (t &Tensor[T]) as_i8[T]() &Tensor[i8] { // as_int casts the Tensor to a Tensor of ints. // If the original Tensor is not a Tensor of ints, then each value is cast to a int, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_int[T]() &Tensor[int] { $if T is int { return t @@ -123,7 +123,7 @@ pub fn (t &Tensor[T]) as_int[T]() &Tensor[int] { // as_string casts the Tensor to a Tensor of string values. // If the original Tensor is not a Tensor of strings, then each value is cast to a string, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_string[T]() &Tensor[string] { $if T is string { return t @@ -143,7 +143,7 @@ pub fn (t &Tensor[T]) as_string[T]() &Tensor[string] { // as_u8 casts the Tensor to a Tensor of u8 values. // If the original Tensor is not a Tensor of u8s, then each value is cast to a u8, // otherwise the original Tensor is returned. -[inline] +@[inline] pub fn (t &Tensor[T]) as_u8[T]() &Tensor[u8] { $if T is u8 { return t diff --git a/src/creation.v b/src/creation.v index e174bcf0..c0033051 100644 --- a/src/creation.v +++ b/src/creation.v @@ -1,19 +1,19 @@ module vtl // empty returns a new Tensor of given shape and type, without initializing entries -[inline] +@[inline] pub fn empty[T](shape []int, params TensorData) &Tensor[T] { return tensor[T](cast[T](0), shape, params) } // empty_like returns a new Tensor of given shape and type as a given Tensor -[inline] +@[inline] pub fn empty_like[T](t &Tensor[T]) &Tensor[T] { return tensor_like[T](t) } // identity returns an array is a square array with ones on the main diagonal -[inline] +@[inline] pub fn identity[T](n int, params TensorData) &Tensor[T] { return eye[T](n, n, 0, params) } @@ -32,31 +32,31 @@ pub fn eye[T](m int, n int, k int, params TensorData) &Tensor[T] { } // zeros returns a new tensor of a given shape and type, filled with zeros -[inline] +@[inline] pub fn zeros[T](shape []int, params TensorData) &Tensor[T] { return tensor[T](cast[T](0), shape, params) } // zeros_like returns a new Tensor of given shape and type as a given Tensor, filled with zeros -[inline] +@[inline] pub fn zeros_like[T](t &Tensor[T]) &Tensor[T] { return tensor_like[T](t) } // ones returns a new tensor of a given shape and type, filled with ones -[inline] +@[inline] pub fn ones[T](shape []int, params TensorData) &Tensor[T] { return full[T](shape, cast[T](1), params) } // ones_like returns a new tensor of a given shape and type, filled with ones -[inline] +@[inline] pub fn ones_like[T](t &Tensor[T]) &Tensor[T] { return full_like[T](t, cast[T](1)) } // full returns a new tensor of a given shape and type, filled with the given value -[inline] +@[inline] pub fn full[T](shape []int, val T, params TensorData) &Tensor[T] { return tensor[T](val, shape, params) } @@ -80,7 +80,7 @@ pub fn range[T](from int, to int, params TensorData) &Tensor[T] { } // seq returns a Tensor containing values ranging from [0, to) -[inline] +@[inline] pub fn seq[T](n int, params TensorData) &Tensor[T] { return range[T](0, n, params) } diff --git a/src/fun.v b/src/fun.v index ea3d7170..31f18131 100644 --- a/src/fun.v +++ b/src/fun.v @@ -109,7 +109,7 @@ pub fn (t &Tensor[T]) diagonal[T]() &Tensor[T] { // ravel returns a flattened view of an Tensor if possible, // otherwise a flattened copy -[inline] +@[inline] pub fn (t &Tensor[T]) ravel[T]() !&Tensor[T] { return t.reshape([-1]) } diff --git a/src/fun_logical.v b/src/fun_logical.v index f4dc6edf..5810b360 100644 --- a/src/fun_logical.v +++ b/src/fun_logical.v @@ -105,7 +105,7 @@ pub fn (t &Tensor[T]) array_equiv[T](other &Tensor[T]) bool { return true } -[inline] +@[inline] fn handle_equal[T](vals []T, _ []int) bool { mut equal := true for v in vals { @@ -115,7 +115,7 @@ fn handle_equal[T](vals []T, _ []int) bool { } // equal compares two tensors elementwise -[inline] +@[inline] pub fn (t &Tensor[T]) equal[T](other &Tensor[T]) !&Tensor[bool] { // TODO: Implement using nmap mut iters, shape := t.iterators[T]([other])! @@ -129,7 +129,7 @@ pub fn (t &Tensor[T]) equal[T](other &Tensor[T]) !&Tensor[bool] { } // not_equal compares two tensors elementwise -[inline] +@[inline] pub fn (t &Tensor[T]) not_equal[T](other &Tensor[T]) !&Tensor[bool] { // TODO: Implement using nmap mut iters, shape := t.iterators[T]([other])! @@ -143,7 +143,7 @@ pub fn (t &Tensor[T]) not_equal[T](other &Tensor[T]) !&Tensor[bool] { } // tolerance compares two tensors elementwise with a given tolerance -[inline] +@[inline] pub fn (t &Tensor[T]) tolerance[T](other &Tensor[T], tol T) !&Tensor[bool] { // TODO: Implement using nmap mut iters, shape := t.iterators[T]([other])! @@ -157,7 +157,7 @@ pub fn (t &Tensor[T]) tolerance[T](other &Tensor[T], tol T) !&Tensor[bool] { } // close compares two tensors elementwise -[inline] +@[inline] pub fn (t &Tensor[T]) close[T](other &Tensor[T]) !&Tensor[bool] { // TODO: Implement using nmap mut iters, shape := t.iterators[T]([other])! @@ -171,7 +171,7 @@ pub fn (t &Tensor[T]) close[T](other &Tensor[T]) !&Tensor[bool] { } // veryclose compares two tensors elementwise -[inline] +@[inline] pub fn (t &Tensor[T]) veryclose[T](other &Tensor[T]) !&Tensor[bool] { // TODO: Implement using nmap mut iters, shape := t.iterators[T]([other])! @@ -185,7 +185,7 @@ pub fn (t &Tensor[T]) veryclose[T](other &Tensor[T]) !&Tensor[bool] { } // alike compares two tensors elementwise -[inline] +@[inline] pub fn (t &Tensor[T]) alike[T](other &Tensor[T]) !&Tensor[bool] { // TODO: Implement using nmap mut iters, shape := t.iterators[T]([other])! diff --git a/src/iter.v b/src/iter.v index 2e5084a0..d5e47831 100644 --- a/src/iter.v +++ b/src/iter.v @@ -9,7 +9,7 @@ pub enum IteratorStrategy { // TensorIterator is a struct to hold a Tensors // iteration state while iterating through a Tensor -[heap] +@[heap] pub struct TensorIterator[T] { pub: tensor &Tensor[T] @@ -42,7 +42,7 @@ pub fn (t &Tensor[T]) custom_iterator[T](data IteratorBuildData[T]) &TensorItera // next calls the iteration type for a given iterator // which is either flat or strided and returns a Num containing the current value -[inline] +@[inline] pub fn (mut s TensorIterator[T]) next[T]() ?(T, []int) { if s.iteration >= s.tensor.size() { return none @@ -87,7 +87,7 @@ pub fn (t &Tensor[T]) iterators[T](ts []&Tensor[T]) !(&TensorsIterator[T], []int // next calls the iteration type for a given list of iterators // which is either flat or strided and returns a list of Nums containing the current values -[inline] +@[inline] pub fn (mut its TensorsIterator[T]) next[T]() ?([]T, []int) { mut nums := []T{cap: its.iters.len} mut index := []int{} diff --git a/src/iter_axis.v b/src/iter_axis.v index f76d918e..c0744db2 100644 --- a/src/iter_axis.v +++ b/src/iter_axis.v @@ -2,7 +2,7 @@ module vtl // TensorAxisIterator is the core iterator for axis-wise operations. // Stores a copy of the shape and strides reduced along a given axis -[heap] +@[heap] pub struct TensorAxisIterator[T] { pub: tensor &Tensor[T] @@ -54,7 +54,7 @@ pub fn (t &Tensor[T]) axis_with_dims_iterator[T](axis int) &TensorAxisIterator[T // next calls the iteration type for a given iterator // which is either flat or strided and returns a Num containing the current value -[inline] +@[inline] pub fn (mut s TensorAxisIterator[T]) next[T]() ?(T, []int) { if s.iteration >= s.tensor.shape[s.axis] { return none diff --git a/src/lookup.v b/src/lookup.v index fb1c38b5..bcdbefbd 100644 --- a/src/lookup.v +++ b/src/lookup.v @@ -1,14 +1,14 @@ module vtl // get returns a scalar value from a Tensor at the provided index -[inline] +@[inline] pub fn (t &Tensor[T]) get[T](index []int) T { offset := t.offset_index(index) return t.data.get[T](offset) } // get_nth returns a scalar value from a Tensor at the provided index -[inline] +@[inline] pub fn (t &Tensor[T]) get_nth[T](n int) T { index := t.nth_index(n) return t.get[T](index) @@ -16,7 +16,7 @@ pub fn (t &Tensor[T]) get_nth[T](n int) T { // offset_index returns the index to a Tensor's data at // a given index -[inline] +@[inline] pub fn (t &Tensor[T]) offset_index[T](index []int) int { mut offset := 0 for i in 0 .. t.rank() { diff --git a/src/math.v b/src/math.v index c5a291c6..738de97a 100644 --- a/src/math.v +++ b/src/math.v @@ -3,7 +3,7 @@ module vtl import math // abs returns the elementwise abs of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) abs[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { // TODO: Figure out a way to do this without casting to f64 @@ -12,7 +12,7 @@ pub fn (t &Tensor[T]) abs[T]() &Tensor[T] { } // acos returns the elementwise acos of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) acos[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.acos(td(x).f64())) @@ -20,7 +20,7 @@ pub fn (t &Tensor[T]) acos[T]() &Tensor[T] { } // acosh returns the elementwise acosh of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) acosh[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.acosh(td(x).f64())) @@ -28,7 +28,7 @@ pub fn (t &Tensor[T]) acosh[T]() &Tensor[T] { } // asin returns the elementwise asin of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) asin[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.asin(td(x).f64())) @@ -36,7 +36,7 @@ pub fn (t &Tensor[T]) asin[T]() &Tensor[T] { } // asinh returns the elementwise asinh of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) asinh[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.asinh(td(x).f64())) @@ -44,7 +44,7 @@ pub fn (t &Tensor[T]) asinh[T]() &Tensor[T] { } // atan returns the elementwise atan of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) atan[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.atan(td(x).f64())) @@ -52,7 +52,7 @@ pub fn (t &Tensor[T]) atan[T]() &Tensor[T] { } // atan2 returns the atan2 elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) atan2[T](b &Tensor[T]) !&Tensor[T] { return a.nmap([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -62,7 +62,7 @@ pub fn (a &Tensor[T]) atan2[T](b &Tensor[T]) !&Tensor[T] { } // atanh returns the elementwise atanh of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) atanh[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.atanh(td(x).f64())) @@ -70,7 +70,7 @@ pub fn (t &Tensor[T]) atanh[T]() &Tensor[T] { } // cbrt returns the elementwise cbrt of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) cbrt[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.cbrt(td(x).f64())) @@ -78,7 +78,7 @@ pub fn (t &Tensor[T]) cbrt[T]() &Tensor[T] { } // ceil returns the elementwise ceil of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) ceil[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.ceil(td(x).f64())) @@ -86,7 +86,7 @@ pub fn (t &Tensor[T]) ceil[T]() &Tensor[T] { } // cos returns the elementwise cos of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) cos[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.cos(td(x).f64())) @@ -94,7 +94,7 @@ pub fn (t &Tensor[T]) cos[T]() &Tensor[T] { } // cosh returns the elementwise cosh of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) cosh[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.cosh(td(x).f64())) @@ -102,7 +102,7 @@ pub fn (t &Tensor[T]) cosh[T]() &Tensor[T] { } // cot returns the elementwise cot of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) cot[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.cot(td(x).f64())) @@ -110,7 +110,7 @@ pub fn (t &Tensor[T]) cot[T]() &Tensor[T] { } // degrees returns the elementwise degrees of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) degrees[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.degrees(td(x).f64())) @@ -118,7 +118,7 @@ pub fn (t &Tensor[T]) degrees[T]() &Tensor[T] { } // erf returns the elementwise erf of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) erf[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.erf(td(x).f64())) @@ -126,7 +126,7 @@ pub fn (t &Tensor[T]) erf[T]() &Tensor[T] { } // erfc returns the elementwise erfc of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) erfc[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.erfc(td(x).f64())) @@ -134,7 +134,7 @@ pub fn (t &Tensor[T]) erfc[T]() &Tensor[T] { } // exp returns the elementwise exp of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) exp[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.exp(td(x).f64())) @@ -142,7 +142,7 @@ pub fn (t &Tensor[T]) exp[T]() &Tensor[T] { } // exp2 returns the elementwise exp2 of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) exp2[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.exp2(td(x).f64())) @@ -150,7 +150,7 @@ pub fn (t &Tensor[T]) exp2[T]() &Tensor[T] { } // expm1 returns the elementwise expm1 of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) expm1[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.expm1(td(x).f64())) @@ -158,7 +158,7 @@ pub fn (t &Tensor[T]) expm1[T]() &Tensor[T] { } // f32_bits returns the elementwise f32_bits of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) f32_bits[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.f32_bits(td(x).f32())) @@ -166,7 +166,7 @@ pub fn (t &Tensor[T]) f32_bits[T]() &Tensor[T] { } // f32_from_bits returns the elementwise f32_from_bits of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) f32_from_bits[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.f32_from_bits(td(x).u32())) @@ -174,7 +174,7 @@ pub fn (t &Tensor[T]) f32_from_bits[T]() &Tensor[T] { } // f64_bits returns the elementwise f64_bits of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) f64_bits[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.f64_bits(td(x).f64())) @@ -182,7 +182,7 @@ pub fn (t &Tensor[T]) f64_bits[T]() &Tensor[T] { } // f64_from_bits returns the elementwise f64_from_bits of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) f64_from_bits[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.f64_from_bits(td(x).u64())) @@ -190,7 +190,7 @@ pub fn (t &Tensor[T]) f64_from_bits[T]() &Tensor[T] { } // factorial returns the elementwise factorial of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) factorial[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.factorial(td(x).f64())) @@ -198,7 +198,7 @@ pub fn (t &Tensor[T]) factorial[T]() &Tensor[T] { } // floor returns the elementwise floor of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) floor[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.floor(td(x).f64())) @@ -206,7 +206,7 @@ pub fn (t &Tensor[T]) floor[T]() &Tensor[T] { } // fmod returns the fmod elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) fmod[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -216,7 +216,7 @@ pub fn (a &Tensor[T]) fmod[T](b &Tensor[T]) !&Tensor[T] { } // gamma returns the elementwise gamma of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) gamma[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.gamma(td(x).f64())) @@ -224,7 +224,7 @@ pub fn (t &Tensor[T]) gamma[T]() &Tensor[T] { } // gcd returns the gcd elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) gcd[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -234,7 +234,7 @@ pub fn (a &Tensor[T]) gcd[T](b &Tensor[T]) !&Tensor[T] { } // hypot returns the hypot elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) hypot[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -244,7 +244,7 @@ pub fn (a &Tensor[T]) hypot[T](b &Tensor[T]) !&Tensor[T] { } // lcm returns the lcm elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) lcm[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -254,7 +254,7 @@ pub fn (a &Tensor[T]) lcm[T](b &Tensor[T]) !&Tensor[T] { } // log returns the elementwise log of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) log[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.log(td(x).f64())) @@ -262,7 +262,7 @@ pub fn (t &Tensor[T]) log[T]() &Tensor[T] { } // log10 returns the elementwise log10 of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) log10[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.log10(td(x).f64())) @@ -270,7 +270,7 @@ pub fn (t &Tensor[T]) log10[T]() &Tensor[T] { } // log1p returns the elementwise log1p of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) log1p[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.log1p(td(x).f64())) @@ -278,7 +278,7 @@ pub fn (t &Tensor[T]) log1p[T]() &Tensor[T] { } // log2 returns the elementwise log2 of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) log2[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.log2(td(x).f64())) @@ -286,7 +286,7 @@ pub fn (t &Tensor[T]) log2[T]() &Tensor[T] { } // log_factorial returns the elementwise log_factorial of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) log_factorial[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.log_factorial(td(x).f64())) @@ -294,7 +294,7 @@ pub fn (t &Tensor[T]) log_factorial[T]() &Tensor[T] { } // log_gamma returns the elementwise log_gamma of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) log_gamma[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.log_gamma(td(x).f64())) @@ -302,7 +302,7 @@ pub fn (t &Tensor[T]) log_gamma[T]() &Tensor[T] { } // log_n returns the log_n elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) log_n[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -312,7 +312,7 @@ pub fn (a &Tensor[T]) log_n[T](b &Tensor[T]) !&Tensor[T] { } // max returns the max elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) max[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { return math.max(xs[0], xs[1]) @@ -320,7 +320,7 @@ pub fn (a &Tensor[T]) max[T](b &Tensor[T]) !&Tensor[T] { } // min returns the min elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) min[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { return math.min(xs[0], xs[1]) @@ -328,7 +328,7 @@ pub fn (a &Tensor[T]) min[T](b &Tensor[T]) !&Tensor[T] { } // nextafter returns the nextafter elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) nextafter[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -338,7 +338,7 @@ pub fn (a &Tensor[T]) nextafter[T](b &Tensor[T]) !&Tensor[T] { } // nextafter32 returns the nextafter32 elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) nextafter32[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -348,7 +348,7 @@ pub fn (a &Tensor[T]) nextafter32[T](b &Tensor[T]) !&Tensor[T] { } // pow returns the pow elementwise of two tensors -[inline] +@[inline] pub fn (a &Tensor[T]) pow[T](b &Tensor[T]) !&Tensor[T] { return a.nmap[T]([b], fn [T](xs []T, _ []int) T { x := xs[0] @@ -358,7 +358,7 @@ pub fn (a &Tensor[T]) pow[T](b &Tensor[T]) !&Tensor[T] { } // pow10 returns the elementwise pow10 of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) pow10[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.pow10(td(x).int())) @@ -366,7 +366,7 @@ pub fn (t &Tensor[T]) pow10[T]() &Tensor[T] { } // radians returns the elementwise deg2rad of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) radians[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.radians(td(x).f64())) @@ -374,7 +374,7 @@ pub fn (t &Tensor[T]) radians[T]() &Tensor[T] { } // round rounds elements of an tensor elementwise -[inline] +@[inline] pub fn (t &Tensor[T]) round[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.round(td(x).f64())) @@ -382,7 +382,7 @@ pub fn (t &Tensor[T]) round[T]() &Tensor[T] { } // round_to_even round_to_evens elements of an tensor elementwise -[inline] +@[inline] pub fn (t &Tensor[T]) round_to_even[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.round_to_even(td(x).f64())) @@ -390,7 +390,7 @@ pub fn (t &Tensor[T]) round_to_even[T]() &Tensor[T] { } // sin returns the elementwise sin of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) sin[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.sin(td(x).f64())) @@ -398,7 +398,7 @@ pub fn (t &Tensor[T]) sin[T]() &Tensor[T] { } // sinh returns the elementwise sinh of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) sinh[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.sinh(td(x).f64())) @@ -406,7 +406,7 @@ pub fn (t &Tensor[T]) sinh[T]() &Tensor[T] { } // sqrt returns the elementwise square root of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) sqrt[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.sqrt(td(x).f64())) @@ -414,7 +414,7 @@ pub fn (t &Tensor[T]) sqrt[T]() &Tensor[T] { } // tan returns the elementwise tan of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) tan[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.tan(td(x).f64())) @@ -422,7 +422,7 @@ pub fn (t &Tensor[T]) tan[T]() &Tensor[T] { } // tanh returns the elementwise tanh of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) tanh[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.tanh(td(x).f64())) @@ -430,7 +430,7 @@ pub fn (t &Tensor[T]) tanh[T]() &Tensor[T] { } // trunc returns the elementwise trunc of an tensor -[inline] +@[inline] pub fn (t &Tensor[T]) trunc[T]() &Tensor[T] { return t.map(fn [T](x T, _ []int) T { return cast[T](math.trunc(td(x).f64())) diff --git a/src/math_op.v b/src/math_op.v index 62266aa5..fede2783 100644 --- a/src/math_op.v +++ b/src/math_op.v @@ -1,7 +1,7 @@ module vtl // add adds two tensors elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) add[T](b &Tensor[T]) !&Tensor[T] { return a.nmap([b], fn [T](xs []T, _ []int) T { a := xs[0] @@ -17,7 +17,7 @@ pub fn (a &Tensor[T]) add[T](b &Tensor[T]) !&Tensor[T] { } // add adds a scalar to a tensor elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) add_scalar[T](scalar T) !&Tensor[T] { return a.map(fn [scalar] [T](x T, _ []int) T { $if T is bool { @@ -31,7 +31,7 @@ pub fn (a &Tensor[T]) add_scalar[T](scalar T) !&Tensor[T] { } // subtract subtracts two tensors elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) subtract[T](b &Tensor[T]) !&Tensor[T] { return a.nmap([b], fn [T](xs []T, _ []int) T { a := xs[0] @@ -47,7 +47,7 @@ pub fn (a &Tensor[T]) subtract[T](b &Tensor[T]) !&Tensor[T] { } // subtract subtracts a scalar to a tensor elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) subtract_scalar[T](scalar T) !&Tensor[T] { return a.map(fn [scalar] [T](x T, _ []int) T { $if T is bool { @@ -61,7 +61,7 @@ pub fn (a &Tensor[T]) subtract_scalar[T](scalar T) !&Tensor[T] { } // divide divides two tensors elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) divide[T](b &Tensor[T]) !&Tensor[T] { return a.nmap([b], fn [T](xs []T, _ []int) T { a := xs[0] @@ -75,7 +75,7 @@ pub fn (a &Tensor[T]) divide[T](b &Tensor[T]) !&Tensor[T] { } // divide divides a scalar to a tensor elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) divide_scalar[T](scalar T) !&Tensor[T] { return a.map(fn [scalar] [T](x T, _ []int) T { $if T is bool || T is string { @@ -87,7 +87,7 @@ pub fn (a &Tensor[T]) divide_scalar[T](scalar T) !&Tensor[T] { } // multiply multiplies two tensors elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) multiply[T](b &Tensor[T]) !&Tensor[T] { return a.nmap([b], fn [T](xs []T, _ []int) T { a := xs[0] @@ -101,7 +101,7 @@ pub fn (a &Tensor[T]) multiply[T](b &Tensor[T]) !&Tensor[T] { } // multiply multiplies a scalar to a tensor elementwise -[inline] +@[inline] pub fn (a &Tensor[T]) multiply_scalar[T](scalar T) !&Tensor[T] { return a.map(fn [scalar] [T](x T, _ []int) T { $if T is bool || T is string { diff --git a/src/rand.v b/src/rand.v index 85a10e58..bbdd1fc5 100644 --- a/src/rand.v +++ b/src/rand.v @@ -43,7 +43,7 @@ pub fn exponential[T](lambda f64, shape []int, params TensorData) &Tensor[T] { } // NormalTensorData is the data for a normal distribution. -[params] +@[params] pub struct NormalTensorData { TensorData config.NormalConfigStruct diff --git a/src/stack.v b/src/stack.v index 5eae130f..7f8a4117 100644 --- a/src/stack.v +++ b/src/stack.v @@ -1,6 +1,6 @@ module vtl -[params] +@[params] pub struct AxisData { axis int } diff --git a/src/tensor.v b/src/tensor.v index f412ddbe..de6456fa 100644 --- a/src/tensor.v +++ b/src/tensor.v @@ -9,7 +9,7 @@ pub enum MemoryFormat { } // Tensor is the main structure defined by VTL to manage N Dimensional data -[heap] +@[heap] pub struct Tensor[T] { pub mut: data &storage.CpuStorage[T] @@ -20,13 +20,13 @@ pub mut: } // cpu returns a Tensor from a Tensor -[inline] +@[inline] pub fn (t &Tensor[T]) cpu() !&Tensor[T] { return t } // str returns the string representation of a Tensor -[inline] +@[inline] pub fn (t &Tensor[T]) str() string { return tensor_str[T](t, ', ', '') or { '' } } @@ -42,26 +42,26 @@ pub fn (t &Tensor[T]) size() int { } // is_matrix returns if a Tensor is a nxm matrix or not -[inline] +@[inline] pub fn (t &Tensor[T]) is_matrix() bool { return t.rank() == 2 } // is_matrix returns if a Tensor is a square matrix or not -[inline] +@[inline] pub fn (t &Tensor[T]) is_square_matrix() bool { return t.rank() == 2 && t.shape[0] == t.shape[1] } // is_matrix returns if a Tensor is a square 1D vector or not -[inline] +@[inline] pub fn (t &Tensor[T]) is_vector() bool { return t.rank() == 1 } // is_row_major returns if a Tensor is supposed to store its data in Row-Major // order -[inline] +@[inline] pub fn (t &Tensor[T]) is_row_major() bool { // TODO: we need to ensure that t.memory is the source of truth return t.memory == .row_major @@ -69,7 +69,7 @@ pub fn (t &Tensor[T]) is_row_major() bool { // is_col_major returns if a Tensor is supposed to store its data in Col-Major // order -[inline] +@[inline] pub fn (t &Tensor[T]) is_col_major() bool { // TODO: we need to ensure that t.memory is the source of truth return t.memory == .col_major @@ -77,21 +77,21 @@ pub fn (t &Tensor[T]) is_col_major() bool { // is_row_major verifies if a Tensor stores its data in Row-Major // order -[inline] +@[inline] pub fn (t &Tensor[T]) is_row_major_contiguous() bool { return is_row_major_contiguous(t.shape, t.strides, t.rank()) } // is_col_major verifies if a Tensor stores its data in Col-Major // order -[inline] +@[inline] pub fn (t &Tensor[T]) is_col_major_contiguous() bool { return is_col_major_contiguous(t.shape, t.strides, t.rank()) } // is_contiguous verifies that a Tensor is contiguous independent of // memory layout -[inline] +@[inline] pub fn (t &Tensor[T]) is_contiguous() bool { return t.is_row_major_contiguous() || t.is_col_major_contiguous() } @@ -110,7 +110,7 @@ pub fn (t &Tensor[T]) to_array() []T { // copy returns a copy of a Tensor with a particular memory // layout, either row_major-contiguous or col_major-contiguous -[inline] +@[inline] pub fn (t &Tensor[T]) copy(memory MemoryFormat) &Tensor[T] { strides := strides_from_shape(t.shape, memory) size := size_from_shape(t.shape) @@ -127,7 +127,7 @@ pub fn (t &Tensor[T]) copy(memory MemoryFormat) &Tensor[T] { // view returns a view of a Tensor, identical to the // parent but not owning its own data -[inline] +@[inline] pub fn (t &Tensor[T]) view() &Tensor[T] { return &Tensor[T]{ data: t.data diff --git a/src/tensor_any_d_vcl.v b/src/tensor_any_d_vcl.v index 75404649..e4f7c998 100644 --- a/src/tensor_any_d_vcl.v +++ b/src/tensor_any_d_vcl.v @@ -2,7 +2,7 @@ module vtl // AnyTensor is an interface that allows for any tensor to be used in the vtl library pub interface AnyTensor[T] { - shape []int + shape []int strides []int cpu() &Tensor[T] vcl() !&VclTensor[T] diff --git a/src/tensor_any_notd_vcl.v b/src/tensor_any_notd_vcl.v index fa91d3eb..63b85230 100644 --- a/src/tensor_any_notd_vcl.v +++ b/src/tensor_any_notd_vcl.v @@ -2,7 +2,7 @@ module vtl // AnyTensor is an interface that allows for any tensor to be used in the vtl library pub interface AnyTensor[T] { - shape []int + shape []int strides []int cpu() &Tensor[T] vcl() !&Tensor[T] diff --git a/src/tensor_vcl_d_vcl.v b/src/tensor_vcl_d_vcl.v index d33fb5f9..ff008d85 100644 --- a/src/tensor_vcl_d_vcl.v +++ b/src/tensor_vcl_d_vcl.v @@ -3,7 +3,7 @@ module vtl import vtl.storage // VclTensor is the main structure defined by VTL to manage N Dimensional data -[heap] +@[heap] pub struct VclTensor[T] { pub mut: data &storage.VclStorage[T] @@ -39,7 +39,7 @@ pub fn (t &VclTensor[T]) cpu() !&Tensor[T] { } // vcl returns a VclTensor from a VclTensor -[inline] +@[inline] pub fn (t &VclTensor[T]) vcl() !&VclTensor[T] { return t } @@ -66,26 +66,26 @@ pub fn (t &VclTensor[T]) size() int { } // is_matrix returns if a VclTensor is a nxm matrix or not -[inline] +@[inline] pub fn (t &VclTensor[T]) is_matrix() bool { return t.rank() == 2 } // is_matrix returns if a VclTensor is a square matrix or not -[inline] +@[inline] pub fn (t &VclTensor[T]) is_square_matrix() bool { return t.rank() == 2 && t.shape[0] == t.shape[1] } // is_matrix returns if a VclTensor is a square 1D vector or not -[inline] +@[inline] pub fn (t &VclTensor[T]) is_vector() bool { return t.rank() == 1 } // is_row_major returns if a VclTensor is supposed to store its data in Row-Major // order -[inline] +@[inline] pub fn (t &VclTensor[T]) is_row_major() bool { // TODO: we need to ensure that t.memory is the source of truth return t.memory == .row_major @@ -93,7 +93,7 @@ pub fn (t &VclTensor[T]) is_row_major() bool { // is_col_major returns if a VclTensor is supposed to store its data in Col-Major // order -[inline] +@[inline] pub fn (t &VclTensor[T]) is_col_major() bool { // TODO: we need to ensure that t.memory is the source of truth return t.memory == .col_major @@ -101,21 +101,21 @@ pub fn (t &VclTensor[T]) is_col_major() bool { // is_row_major verifies if a VclTensor stores its data in Row-Major // order -[inline] +@[inline] pub fn (t &VclTensor[T]) is_row_major_contiguous() bool { return is_row_major_contiguous(t.shape, t.strides, t.rank()) } // is_col_major verifies if a VclTensor stores its data in Col-Major // order -[inline] +@[inline] pub fn (t &VclTensor[T]) is_col_major_contiguous() bool { return is_col_major_contiguous(t.shape, t.strides, t.rank()) } // is_contiguous verifies that a VclTensor is contiguous independent of // memory layout -[inline] +@[inline] pub fn (t &VclTensor[T]) is_contiguous() bool { return t.is_row_major_contiguous() || t.is_col_major_contiguous() } diff --git a/src/tensor_vcl_notd_vcl.v b/src/tensor_vcl_notd_vcl.v index dcfa61f0..64176c20 100644 --- a/src/tensor_vcl_notd_vcl.v +++ b/src/tensor_vcl_notd_vcl.v @@ -1,6 +1,6 @@ module vtl -[params] +@[params] pub struct VclParams {} // vcl returns a VclTensor from a Tensor diff --git a/src/util.v b/src/util.v index 3c28282e..120dea90 100644 --- a/src/util.v +++ b/src/util.v @@ -3,7 +3,7 @@ module vtl import arrays // assert_square_matrix panics if the given tensor is not a square matrix -[inline] +@[inline] pub fn (t &Tensor[T]) assert_square_matrix[T]() ! { if !t.is_square_matrix() { return error('Matrix is not square') @@ -11,7 +11,7 @@ pub fn (t &Tensor[T]) assert_square_matrix[T]() ! { } // assert_square_matrix panics if the given tensor is not a matrix -[inline] +@[inline] pub fn (t &Tensor[T]) assert_matrix[T]() ! { if !t.is_matrix() { return error('Tensor is not two-dimensional') @@ -28,7 +28,7 @@ fn irange(start int, stop int) []int { } // assert_rank ensures that a Tensor has a given rank -[inline] +@[inline] fn (t &Tensor[T]) assert_rank[T](n int) ! { if n != t.rank() { return error('Bad number of dimensions') @@ -36,7 +36,7 @@ fn (t &Tensor[T]) assert_rank[T](n int) ! { } // assert_min_rank ensures that a Tensor has at least a given rank -[inline] +@[inline] fn (t &Tensor[T]) assert_min_rank[T](n int) ! { if n > t.rank() { return error('Bad number of dimensions') @@ -44,7 +44,7 @@ fn (t &Tensor[T]) assert_min_rank[T](n int) ! { } // ensure_memory sets a correct memory layout to a given tensor -[inline] +@[inline] pub fn (mut t Tensor[T]) ensure_memory[T]() { if t.is_col_major() { if !t.is_col_major_contiguous() { @@ -80,7 +80,7 @@ fn assert_shape_off_axis[T](ts []&Tensor[T], axis int, shape []int) ![]int { // assert_shape ensures that the shapes of Tensors match // for each tensor given list of tensors -[inline] +@[inline] fn assert_shape[T](shape []int, ts []&Tensor[T]) ! { for t in ts { if shape != t.shape { @@ -245,7 +245,7 @@ fn pad_with_max(pad []int, shape []int, ndims int) []int { // iarray_min returns the minimum value of a given array of int values // the use of arrays.min give us an optimizad version of this function -[inline] +@[inline] fn iarray_min(arr []int) int { return arrays.min[int](arr) or { 0 } } diff --git a/src/util_d_vcl.v b/src/util_d_vcl.v index d168846b..6165da8b 100644 --- a/src/util_d_vcl.v +++ b/src/util_d_vcl.v @@ -1,7 +1,7 @@ module vtl // assert_square_matrix panics if the given tensor is not a square matrix -[inline] +@[inline] fn (t &VclTensor[T]) assert_square_matrix[T]() ! { if t.is_square_matrix() { return error('Matrix is not square') @@ -9,7 +9,7 @@ fn (t &VclTensor[T]) assert_square_matrix[T]() ! { } // assert_square_matrix panics if the given tensor is not a matrix -[inline] +@[inline] fn (t &VclTensor[T]) assert_matrix[T]() ! { if t.is_matrix() { return error('Tensor is not two-dimensional') @@ -17,7 +17,7 @@ fn (t &VclTensor[T]) assert_matrix[T]() ! { } // assert_rank ensures that a Tensor has a given rank -[inline] +@[inline] fn (t &VclTensor[T]) assert_rank[T](n int) ! { if n != t.rank() { return error('Bad number of dimensions') @@ -25,7 +25,7 @@ fn (t &VclTensor[T]) assert_rank[T](n int) ! { } // assert_min_rank ensures that a Tensor has at least a given rank -[inline] +@[inline] fn (t &VclTensor[T]) assert_min_rank[T](n int) ! { if n > t.rank() { return error('Bad number of dimensions') @@ -33,7 +33,7 @@ fn (t &VclTensor[T]) assert_min_rank[T](n int) ! { } // ensure_memory sets a correct memory layout to a given tensor -[inline] +@[inline] pub fn (mut t VclTensor[T]) ensure_memory[T]() { if t.is_col_major() { if !t.is_col_major_contiguous() { diff --git a/stats/stats.v b/stats/stats.v index c9cd1112..d921bd54 100644 --- a/stats/stats.v +++ b/stats/stats.v @@ -198,7 +198,7 @@ pub fn rms[T](t &vtl.Tensor[T]) T { // Population Variance of the given input array // Based on // https://www.mathsisfun.com/data/standard-deviation.html -[inline] +@[inline] pub fn population_variance[T](t &vtl.Tensor[T]) T { if t.size == 0 { return vtl.cast[T](0) @@ -226,7 +226,7 @@ pub fn population_variance_mean[T](t &vtl.Tensor[T], provided_mean T) T { // Sample Variance of the given input array // Based on // https://www.mathsisfun.com/data/standard-deviation.html -[inline] +@[inline] pub fn sample_variance[T](t &vtl.Tensor[T]) T { if t.size == 0 { return vtl.cast[T](0) @@ -254,7 +254,7 @@ pub fn sample_variance_mean[T](t &vtl.Tensor[T], provided_mean T) T { // Population Standard Deviation of the given input array // Based on // https://www.mathsisfun.com/data/standard-deviation.html -[inline] +@[inline] pub fn population_stddev[T](t &vtl.Tensor[T]) T { if t.size == 0 { return vtl.cast[T](0) @@ -268,7 +268,7 @@ pub fn population_stddev[T](t &vtl.Tensor[T]) T { // Population Standard Deviation of the given input array // Based on // https://www.mathsisfun.com/data/standard-deviation.html -[inline] +@[inline] pub fn population_stddev_mean[T](t &vtl.Tensor[T], mean T) T { if t.size == 0 { return vtl.cast[T](0) @@ -281,7 +281,7 @@ pub fn population_stddev_mean[T](t &vtl.Tensor[T], mean T) T { // Sample Standard Deviation of the given input array // Based on // https://www.mathsisfun.com/data/standard-deviation.html -[inline] +@[inline] pub fn sample_stddev[T](t &vtl.Tensor[T]) T { if t.size == 0 { return vtl.cast[T](0) @@ -295,7 +295,7 @@ pub fn sample_stddev[T](t &vtl.Tensor[T]) T { // Sample Standard Deviation of the given input array // Based on // https://www.mathsisfun.com/data/standard-deviation.html -[inline] +@[inline] pub fn sample_stddev_mean[T](t &vtl.Tensor[T], mean T) T { if t.size == 0 { return vtl.cast[T](0) @@ -308,7 +308,7 @@ pub fn sample_stddev_mean[T](t &vtl.Tensor[T], mean T) T { // Mean Absolute Deviation of the given input array // Based on // https://en.wikipedia.org/wiki/Average_absolute_deviation -[inline] +@[inline] pub fn absdev[T](t &vtl.Tensor[T]) T { if t.size == 0 { return vtl.cast[T](0) @@ -333,7 +333,7 @@ pub fn absdev_mean[T](t &vtl.Tensor[T], provided_mean T) T { } // Sum of squares -[inline] +@[inline] pub fn tss[T](t &vtl.Tensor[T]) T { if t.size == 0 { return vtl.cast[T](0) @@ -421,7 +421,7 @@ pub fn range[T](t &vtl.Tensor[T]) T { return max - min } -[inline] +@[inline] pub fn covariance[T](a &vtl.Tensor[T], b &vtl.Tensor[T]) T { mean1 := mean[T](a) mean2 := mean[T](b) @@ -444,7 +444,7 @@ pub fn covariance_mean[T](a &vtl.Tensor[T], b &vtl.Tensor[T], mean1 T, mean2 T) return cov / vtl.cast[T](n) } -[inline] +@[inline] pub fn lag1_autocorrelation[T](t &vtl.Tensor[T]) T { data_mean := mean[T](t) return lag1_autocorrelation_mean(t, data_mean) @@ -468,7 +468,7 @@ pub fn lag1_autocorrelation_mean[T](t &vtl.Tensor[T], provided_mean T) T { return lag1_autocorrelation / lag1_denominator } -[inline] +@[inline] pub fn kurtosis[T](t &vtl.Tensor[T]) T { data_mean := mean[T](t) data_sd := stddev[T](t, data_mean) @@ -491,7 +491,7 @@ pub fn kurtosis_mean_stddev[T](t &vtl.Tensor[T], mean T, sd T) T { return kurtosis / math.pow(sd, vtl.cast[T](4)) } -[inline] +@[inline] pub fn skew[T](t &vtl.Tensor[T]) T { data_mean := mean[T](t) data_sd := stddev[T](t, data_mean) diff --git a/storage/cpu.v b/storage/cpu.v index 2e3d8631..2a395129 100644 --- a/storage/cpu.v +++ b/storage/cpu.v @@ -7,7 +7,7 @@ pub const ( ) // CpuStorage -[heap] +@[heap] pub struct CpuStorage[T] { pub mut: data []T @@ -28,13 +28,13 @@ pub fn from_array[T](arr []T) &CpuStorage[T] { } // Private function. Used to implement Storage operator -[inline] +@[inline] pub fn (s &CpuStorage[T]) get[T](i int) T { return s.data[i] } // Private function. Used to implement assigment to the Storage element -[inline] +@[inline] pub fn (mut s CpuStorage[T]) set[T](i int, val T) { s.data[i] = val } @@ -47,7 +47,7 @@ pub fn (mut s CpuStorage[T]) fill[T](val T) { } // clone returns an independent copy of a given Storage -[inline] +@[inline] pub fn (s &CpuStorage[T]) clone[T]() &CpuStorage[T] { return &CpuStorage[T]{ data: s.data.clone() @@ -55,7 +55,7 @@ pub fn (s &CpuStorage[T]) clone[T]() &CpuStorage[T] { } // like returns an independent copy of a given Storage -[inline] +@[inline] pub fn (s &CpuStorage[T]) like[T]() &CpuStorage[T] { return &CpuStorage[T]{ data: []T{len: s.data.len, cap: s.data.cap} @@ -63,7 +63,7 @@ pub fn (s &CpuStorage[T]) like[T]() &CpuStorage[T] { } // like_with_len returns an independent copy of a given Storage -[inline] +@[inline] pub fn (s &CpuStorage[T]) like_with_len[T](len int) &CpuStorage[T] { mut capacity := if s.data.cap < len { len } else { s.data.cap } return &CpuStorage[T]{ @@ -77,12 +77,12 @@ pub fn (s &CpuStorage[T]) offset[T](start int) &CpuStorage[T] { } } -[inline] +@[inline] pub fn (s &CpuStorage[T]) to_array[T]() []T { return s.data.clone() } -[inline] +@[inline] fn imax(a int, b int) int { return if a > b { a } else { b } } diff --git a/storage/vcl_d_vcl.v b/storage/vcl_d_vcl.v index 2bd7785d..99af7ef0 100644 --- a/storage/vcl_d_vcl.v +++ b/storage/vcl_d_vcl.v @@ -2,13 +2,13 @@ module storage import vsl.vcl -[params] +@[params] pub struct VclStorageParams { device &vcl.Device = unsafe { nil } } // VclStorage -[heap] +@[heap] pub struct VclStorage[T] { pub mut: data vcl.Vector[T] @@ -39,12 +39,12 @@ pub fn (storage &VclStorage[T]) cpu() !&CpuStorage[T] { } } -[inline] +@[inline] pub fn (storage &VclStorage[T]) to_array[T]() ![]T { return storage.data.data() } -[inline] +@[inline] pub fn (storage &VclStorage[T]) release() ! { return storage.data.release() }