Skip to content

Commit

Permalink
fixing imports
Browse files Browse the repository at this point in the history
  • Loading branch information
MartinuzziFrancesco committed Jan 8, 2025
1 parent 136addf commit 4807874
Show file tree
Hide file tree
Showing 8 changed files with 25 additions and 31 deletions.
9 changes: 3 additions & 6 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,10 @@ version = "0.10.5"
[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
CellularAutomata = "878138dc-5b27-11ea-1a71-cb95d38d6b29"
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
PartialFunctions = "570af359-4316-4cb7-8c74-252c00c2016b"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
WeightInitializers = "d49dbf32-c5c2-4618-8acc-27bb2598ef2d"

Expand All @@ -29,12 +26,10 @@ Adapt = "4.1.1"
Aqua = "0.8"
CellularAutomata = "0.0.2"
DifferentialEquations = "7.15.0"
Distances = "0.10"
LIBSVM = "0.8"
LinearAlgebra = "1.10"
MLJLinearModels = "0.9.2, 0.10"
NNlib = "0.9.26"
PartialFunctions = "1.2"
Random = "1.10"
Reexport = "1.2.2"
SafeTestsets = "0.1"
Expand All @@ -51,7 +46,9 @@ LIBSVM = "b1bec4e5-fd48-53fe-b0cb-9723c09d164b"
MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Aqua", "Test", "SafeTestsets", "Random", "DifferentialEquations", "MLJLinearModels", "LIBSVM"]
test = ["Aqua", "Test", "SafeTestsets", "Random", "DifferentialEquations",
"MLJLinearModels", "LIBSVM", "Statistics"]
13 changes: 5 additions & 8 deletions src/ReservoirComputing.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
module ReservoirComputing

using Adapt
using CellularAutomata
using Distances
using LinearAlgebra
using NNlib
using PartialFunctions
using Random
using Adapt: adapt
using CellularAutomata: CellularAutomaton
using LinearAlgebra: eigvals, mul!, I
using NNlib: fast_act, sigmoid
using Random: Random, AbstractRNG
using Reexport: Reexport, @reexport
using Statistics
using StatsBase: sample
using WeightInitializers: DeviceAgnostic, PartialFunction, Utils
@reexport using WeightInitializers
Expand Down
2 changes: 1 addition & 1 deletion src/esn/deepesn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ function DeepESN(train_data,
matrix_type=typeof(train_data))
if states_type isa AbstractPaddedStates
in_size = size(train_data, 1) + 1
train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))),
train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))),
train_data)
end

Expand Down
2 changes: 1 addition & 1 deletion src/esn/esn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ function ESN(train_data,
matrix_type=typeof(train_data))
if states_type isa AbstractPaddedStates
in_size = size(train_data, 1) + 1
train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))),
train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))),
train_data)
end

Expand Down
8 changes: 4 additions & 4 deletions src/esn/esn_predict.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,16 +93,16 @@ end

function allocate_outpad(hesn::HybridESN, states_type, out)
pad_length = length(out) + size(hesn.model.model_data[:, 1], 1)
out_tmp = Adapt.adapt(typeof(out), zeros(pad_length))
out_tmp = adapt(typeof(out), zeros(pad_length))
return allocate_singlepadding(states_type, out_tmp)
end

function allocate_singlepadding(::AbstractPaddedStates, out)
Adapt.adapt(typeof(out), zeros(size(out, 1) + 1))
adapt(typeof(out), zeros(size(out, 1) + 1))
end
function allocate_singlepadding(::StandardStates, out)
Adapt.adapt(typeof(out), zeros(size(out, 1)))
adapt(typeof(out), zeros(size(out, 1)))
end
function allocate_singlepadding(::ExtendedStates, out)
Adapt.adapt(typeof(out), zeros(size(out, 1)))
adapt(typeof(out), zeros(size(out, 1)))
end
18 changes: 9 additions & 9 deletions src/esn/esn_reservoir_drivers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ function create_states(reservoir_driver::AbstractReservoirDriver,
train_len = size(train_data, 2) - washout
res_size = size(reservoir_matrix, 1)

states = Adapt.adapt(typeof(train_data), zeros(res_size, train_len))
states = adapt(typeof(train_data), zeros(res_size, train_len))
tmp_array = allocate_tmp(reservoir_driver, typeof(train_data), res_size)
_state = Adapt.adapt(typeof(train_data), zeros(res_size, 1))
_state = adapt(typeof(train_data), zeros(res_size, 1))

for i in 1:washout
yv = @view train_data[:, i]
Expand All @@ -59,9 +59,9 @@ function create_states(reservoir_driver::AbstractReservoirDriver,
train_len = size(train_data, 2) - washout
res_size = sum([size(reservoir_matrix[i], 1) for i in 1:length(reservoir_matrix)])

states = Adapt.adapt(typeof(train_data), zeros(res_size, train_len))
states = adapt(typeof(train_data), zeros(res_size, train_len))
tmp_array = allocate_tmp(reservoir_driver, typeof(train_data), res_size)
_state = Adapt.adapt(typeof(train_data), zeros(res_size))
_state = adapt(typeof(train_data), zeros(res_size))

for i in 1:washout
for j in 1:length(reservoir_matrix)
Expand Down Expand Up @@ -108,7 +108,7 @@ echo state networks (`ESN`).
- `leaky_coefficient`: The leaky coefficient used in the RNN.
Defaults to 1.0.
"""
function RNN(; activation_function=NNlib.fast_act(tanh), leaky_coefficient=1.0)
function RNN(; activation_function=fast_act(tanh), leaky_coefficient=1.0)
RNN(activation_function, leaky_coefficient)
end

Expand Down Expand Up @@ -142,7 +142,7 @@ function next_state!(out, rnn::RNN, x, y, W::Vector, W_in, b, tmp_array)
end

function allocate_tmp(::RNN, tmp_type, res_size)
return [Adapt.adapt(tmp_type, zeros(res_size, 1)) for i in 1:2]
return [adapt(tmp_type, zeros(res_size, 1)) for i in 1:2]
end

#multiple RNN driver
Expand Down Expand Up @@ -210,7 +210,7 @@ function next_state!(out, mrnn::MRNN, x, y, W, W_in, b, tmp_array)
end

function allocate_tmp(::MRNN, tmp_type, res_size)
return [Adapt.adapt(tmp_type, zeros(res_size, 1)) for i in 1:2]
return [adapt(tmp_type, zeros(res_size, 1)) for i in 1:2]
end

abstract type AbstractGRUVariant end
Expand Down Expand Up @@ -280,7 +280,7 @@ This driver is based on the GRU architecture [^Cho2014].
"_Learning phrase representations using RNN encoder-decoder for statistical machine translation._"
arXiv preprint arXiv:1406.1078 (2014).
"""
function GRU(; activation_function=[NNlib.sigmoid, NNlib.sigmoid, tanh],
function GRU(; activation_function=[sigmoid, sigmoid, tanh],
inner_layer=fill(scaled_rand, 2),
reservoir=fill(rand_sparse, 2),
bias=fill(scaled_rand, 2),
Expand Down Expand Up @@ -344,7 +344,7 @@ function next_state!(out, gru::GRUParams, x, y, W, W_in, b, tmp_array)
end

function allocate_tmp(::GRUParams, tmp_type, res_size)
return [Adapt.adapt(tmp_type, zeros(res_size, 1)) for i in 1:9]
return [adapt(tmp_type, zeros(res_size, 1)) for i in 1:9]
end

#W=U, W_in=W in papers. x=h, and y=x. I know, it's confusing. ( on the left our notation)
Expand Down
2 changes: 1 addition & 1 deletion src/esn/hybridesn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ function HybridESN(model,

if states_type isa AbstractPaddedStates
in_size = size(train_data, 1) + 1
train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))),
train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))),
train_data)
else
in_size = size(train_data, 1)
Expand Down
2 changes: 1 addition & 1 deletion src/predict.jl
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ end

#single matrix for other training methods
function output_storing(training_method, out_size, prediction_len, storing_type)
return Adapt.adapt(storing_type, zeros(out_size, prediction_len))
return adapt(storing_type, zeros(out_size, prediction_len))
end

#general storing -> single matrix
Expand Down

0 comments on commit 4807874

Please sign in to comment.