Skip to content

Commit

Permalink
Passed fmt to match latest V syntax format
Browse files Browse the repository at this point in the history
  • Loading branch information
ulises-jeremias committed Nov 20, 2023
1 parent de6bfba commit e191e63
Show file tree
Hide file tree
Showing 36 changed files with 195 additions and 195 deletions.
4 changes: 2 additions & 2 deletions autograd/context.v
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import vtl
// a number of operations. Variables that interact with each
// other must belong to the same context, or state will be
// lost while tracking operations done.
[heap]
@[heap]
pub struct Context[T] {
pub mut:
// A list of all variables present in an operation.
Expand Down Expand Up @@ -45,7 +45,7 @@ pub fn (mut ctx Context[T]) pop[T]() !&Node[T] {
return ctx.nodes.pop()
}

[params]
@[params]
pub struct ContextVariableData {
requires_grad bool = true
}
Expand Down
2 changes: 1 addition & 1 deletion autograd/node.v
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module autograd
// Node is a member of a computational graph that contains
// a reference to a gate, as well as the parents of the operation
// and the payload that resulted from the operation.
[heap]
@[heap]
pub struct Node[T] {
pub:
// A Gate containing a backwards and cache function for
Expand Down
2 changes: 1 addition & 1 deletion autograd/payload.v
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module autograd
// Payload is a simple wrapper around a Variable. It
// is only abstracted out to be a bit more explicit that
// it is being passed around through an operation
[heap]
@[heap]
pub struct Payload[T] {
pub:
// Contents of the paylod
Expand Down
4 changes: 2 additions & 2 deletions autograd/variable.v
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import vtl
// This is the fundamental object used in automatic
// differentiation, as well as the neural network aspects
// of VTL
[heap]
@[heap]
pub struct Variable[T] {
pub mut:
// The value of the Variable. This should not be edited outside
Expand All @@ -30,7 +30,7 @@ pub mut:
requires_grad bool
}

[params]
@[params]
pub struct VariableData {
requires_grad bool = true
}
Expand Down
4 changes: 2 additions & 2 deletions datasets/loader.v
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ fn get_cache_dir(subdir ...string) string {
return os.join_path(cache_dir, ...subdir)
}

[params]
@[params]
struct RawDownload {
url string
target string
Expand All @@ -39,7 +39,7 @@ fn load_from_url(data RawDownload) ! {
http.download_file(data.url, cache_file_path)!
}

[params]
@[params]
struct DatasetDownload {
dataset string
baseurl string
Expand Down
12 changes: 6 additions & 6 deletions ml/metrics/common_error_functions.v
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,18 @@ import math
import vtl
import vtl.stats

[inline]
@[inline]
pub fn squared_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T] {
diff := y.subtract(y_true)!
return diff.multiply(diff)
}

[inline]
@[inline]
pub fn mean_squared_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !T {
return stats.mean[T](squared_error[T](y, y_true)!)
}

[inline]
@[inline]
pub fn relative_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T] {
return y.nmap([y_true], fn [T](vals []T, i []int) T {
denom := math.max(math.abs(vals[1]), math.abs(vals[0]))
Expand All @@ -26,17 +26,17 @@ pub fn relative_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T
})
}

[inline]
@[inline]
pub fn mean_relative_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !T {
return stats.mean[T](relative_error[T](y, y_true)!)
}

[inline]
@[inline]
pub fn absolute_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !&vtl.Tensor[T] {
return y_true.subtract(y)!.abs()
}

[inline]
@[inline]
pub fn mean_absolute_error[T](y &vtl.Tensor[T], y_true &vtl.Tensor[T]) !T {
return stats.mean[T](absolute_error[T](y, y_true)!)
}
24 changes: 12 additions & 12 deletions nn/internal/activation.v
Original file line number Diff line number Diff line change
Expand Up @@ -4,37 +4,37 @@ import math
import vtl

// tanh squashes a real-valued number to the range [-1, 1]
[inline]
@[inline]
pub fn tanh[T](x &vtl.Tensor[T]) &vtl.Tensor[T] {
return vtl.tanh(x)
}

// deriv_tanh computes the derivative of tanh
[inline]
@[inline]
pub fn deriv_tanh[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T] {
return gradient.nmap([cached], fn [T](vals []T, i []int) T {
return vals[0] * (vtl.cast[T](1) - vals[1] * vals[1])
})
}

// sigmoid takes a real-valued number and squashes it to the range [0, 1]
[inline]
@[inline]
pub fn sigmoid[T](x &vtl.Tensor[T]) &vtl.Tensor[T] {
return x.map(fn [T](val T, i []int) T {
return vtl.cast[T](1) / vtl.cast[T](1) + vtl.cast[T](math.exp(vtl.cast[f64](val)))
})
}

// deriv_sigmoid computes the derivative of sigmoid
[inline]
@[inline]
pub fn deriv_sigmoid[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T] {
return gradient.nmap([cached], fn [T](vals []T, i []int) T {
return vals[0] * (vtl.cast[T](1) - vals[0])
})
}

// relu activation function
[inline]
@[inline]
pub fn relu[T](x &vtl.Tensor[T]) &vtl.Tensor[T] {
return x.map(fn [T](val T, i []int) T {
if val < 0 {
Expand All @@ -45,7 +45,7 @@ pub fn relu[T](x &vtl.Tensor[T]) &vtl.Tensor[T] {
}

// deriv_relu computes the derivate of relu
[inline]
@[inline]
pub fn deriv_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T] {
return gradient.nmap([cached], fn [T](vals []T, i []int) T {
if vals[0] < 0 {
Expand All @@ -56,7 +56,7 @@ pub fn deriv_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tenso
}

// leaky_relu activation function
[inline]
@[inline]
pub fn leaky_relu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] {
return x.map(fn [alpha] [T](val T, i []int) T {
if val < 0 {
Expand All @@ -67,7 +67,7 @@ pub fn leaky_relu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] {
}

// deriv_leaky_relu computes the derivative of leaky_relu
[inline]
@[inline]
pub fn deriv_leaky_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&vtl.Tensor[T] {
return gradient.nmap([cached], fn [alpha] [T](vals []T, i []int) T {
if vals[0] < 0 {
Expand All @@ -78,7 +78,7 @@ pub fn deriv_leaky_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha
}

// elu activation function
[inline]
@[inline]
pub fn elu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] {
return x.map(fn [alpha] [T](val T, i []int) T {
if val < 0 {
Expand All @@ -89,7 +89,7 @@ pub fn elu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T] {
}

// deriv_elu computes the derivative of elu
[inline]
@[inline]
pub fn deriv_elu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&vtl.Tensor[T] {
return gradient.nmap([cached], fn [alpha] [T](vals []T, i []int) T {
if vals[0] < 0 {
Expand All @@ -101,7 +101,7 @@ pub fn deriv_elu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&v

// sigmoid_cross_entropy computes the sigmoid cross entropy between
// the labels and the predictions
[inline]
@[inline]
pub fn sigmoid_cross_entropy[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&vtl.Tensor[T] {
sum := input.nreduce([target], vtl.cast[T](0), fn [T](acc T, vals []T, i []int) T {
next := -(vals[1] * vtl.cast[T](math.max(f64(0), f64(vals[0])))) - vtl.cast[T](math.log(
Expand All @@ -114,7 +114,7 @@ pub fn sigmoid_cross_entropy[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&v
}

// mse squared error between the labels and the predictions
[inline]
@[inline]
pub fn mse[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&vtl.Tensor[T] {
sum := input.nreduce([target], vtl.cast[T](0), fn [T](acc T, vals []T, i []int) T {
next := vtl.cast[T](math.pow(f64(vals[0] - vals[1]), 2.0))
Expand Down
2 changes: 1 addition & 1 deletion nn/layers/dropout.v
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import vtl.nn.internal
import vtl.nn.gates.layers
import vtl.nn.types

[params]
@[params]
pub struct DropoutLayerConfig {
prob f64 = 0.5
}
Expand Down
2 changes: 1 addition & 1 deletion nn/layers/elu.v
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import vtl.nn.internal
import vtl.nn.gates.activation
import vtl.nn.types

[params]
@[params]
pub struct EluLayerConfig {
alpha f64 = 0.01
}
Expand Down
2 changes: 1 addition & 1 deletion nn/optimizers/adam.v
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ pub mut:
second_moments []&vtl.Tensor[T]
}

[params]
@[params]
pub struct AdamOptimizerConfig {
learning_rate f64 = 0.001
beta1 f64 = 0.9
Expand Down
2 changes: 1 addition & 1 deletion nn/optimizers/sgd.v
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pub mut:
params []&autograd.Variable[T]
}

[params]
@[params]
pub struct SgdOptimizerConfig {
learning_rate f64 = 0.001
}
Expand Down
2 changes: 1 addition & 1 deletion nn/types/optimizer.v
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import vtl.autograd
// Optimizer is a generic interface for all optimizers.
pub interface Optimizer[T] {
mut:
params []&autograd.Variable[T]
params []&autograd.Variable[T]
learning_rate f64
update() !
build_params(layers Layer[T])
Expand Down
6 changes: 3 additions & 3 deletions src/assignment.v
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
module vtl

// set copies a scalar value into a Tensor at the provided index
[inline]
@[inline]
pub fn (mut t Tensor[T]) set[T](index []int, val T) {
offset := t.offset_index(index)
t.data.set[T](offset, val)
}

// set_nth copies a scalar value into a Tensor at the provided offset
[inline]
@[inline]
pub fn (mut t Tensor[T]) set_nth[T](n int, val T) {
index := t.nth_index(n)
t.set[T](index, val)
}

// fill fills an entire Tensor with a given value
[inline]
@[inline]
pub fn (mut t Tensor[T]) fill[T](val T) &Tensor[T] {
t.data.fill[T](val)
return t
Expand Down
6 changes: 3 additions & 3 deletions src/broadcast.v
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ fn broadcast_shapes(args ...[]int) []int {
}

// broadcast2 broadcasts two Tensors against each other
[inline]
@[inline]
pub fn broadcast2[T](a &Tensor[T], b &Tensor[T]) !(&Tensor[T], &Tensor[T]) {
shape := a.broadcastable(b)!
r1 := a.broadcast_to(shape)!
Expand All @@ -129,7 +129,7 @@ pub fn broadcast2[T](a &Tensor[T], b &Tensor[T]) !(&Tensor[T], &Tensor[T]) {
}

// broadcast3 broadcasts three Tensors against each other
[inline]
@[inline]
pub fn broadcast3[T](a &Tensor[T], b &Tensor[T], c &Tensor[T]) !(&Tensor[T], &Tensor[T], &Tensor[T]) {
shape := broadcast_shapes(a.shape, b.shape, c.shape)
r1 := a.broadcast_to(shape)!
Expand All @@ -139,7 +139,7 @@ pub fn broadcast3[T](a &Tensor[T], b &Tensor[T], c &Tensor[T]) !(&Tensor[T], &Te
}

// broadcast_n broadcasts N Tensors against each other
[inline]
@[inline]
pub fn broadcast_n[T](ts []&Tensor[T]) ![]&Tensor[T] {
shapes := ts.map(it.shape)
shape := broadcast_shapes(...shapes)
Expand Down
2 changes: 1 addition & 1 deletion src/build.v
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module vtl

import vtl.storage

[params]
@[params]
pub struct TensorData {
pub:
memory MemoryFormat = .row_major
Expand Down
16 changes: 8 additions & 8 deletions src/cast.v
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module vtl
// as_bool casts the Tensor to a Tensor of bools.
// If the original Tensor is not a Tensor of bools, then each value is cast to a bool,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_bool[T]() &Tensor[bool] {
$if T is bool {
return t
Expand All @@ -23,7 +23,7 @@ pub fn (t &Tensor[T]) as_bool[T]() &Tensor[bool] {
// as_f32 casts the Tensor to a Tensor of f32s.
// If the original Tensor is not a Tensor of f32s, then each value is cast to a f32,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_f32[T]() &Tensor[f32] {
$if T is f32 {
return t
Expand All @@ -43,7 +43,7 @@ pub fn (t &Tensor[T]) as_f32[T]() &Tensor[f32] {
// as_f64 casts the Tensor to a Tensor of f64s.
// If the original Tensor is not a Tensor of f64s, then each value is cast to a f64,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_f64[T]() &Tensor[f64] {
$if T is f64 {
return t
Expand All @@ -63,7 +63,7 @@ pub fn (t &Tensor[T]) as_f64[T]() &Tensor[f64] {
// as_i16 casts the Tensor to a Tensor of i16 values.
// If the original Tensor is not a Tensor of i16s, then each value is cast to a i16,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_i16[T]() &Tensor[i16] {
$if T is i16 {
return t
Expand All @@ -83,7 +83,7 @@ pub fn (t &Tensor[T]) as_i16[T]() &Tensor[i16] {
// as_i8 casts the Tensor to a Tensor of i8 values.
// If the original Tensor is not a Tensor of i8s, then each value is cast to a i8,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_i8[T]() &Tensor[i8] {
$if T is i8 {
return t
Expand All @@ -103,7 +103,7 @@ pub fn (t &Tensor[T]) as_i8[T]() &Tensor[i8] {
// as_int casts the Tensor to a Tensor of ints.
// If the original Tensor is not a Tensor of ints, then each value is cast to a int,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_int[T]() &Tensor[int] {
$if T is int {
return t
Expand All @@ -123,7 +123,7 @@ pub fn (t &Tensor[T]) as_int[T]() &Tensor[int] {
// as_string casts the Tensor to a Tensor of string values.
// If the original Tensor is not a Tensor of strings, then each value is cast to a string,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_string[T]() &Tensor[string] {
$if T is string {
return t
Expand All @@ -143,7 +143,7 @@ pub fn (t &Tensor[T]) as_string[T]() &Tensor[string] {
// as_u8 casts the Tensor to a Tensor of u8 values.
// If the original Tensor is not a Tensor of u8s, then each value is cast to a u8,
// otherwise the original Tensor is returned.
[inline]
@[inline]
pub fn (t &Tensor[T]) as_u8[T]() &Tensor[u8] {
$if T is u8 {
return t
Expand Down
Loading

0 comments on commit e191e63

Please sign in to comment.