Skip to content

Commit

Permalink
fix: use elementwise operations
Browse files Browse the repository at this point in the history
  • Loading branch information
agdestein committed Dec 9, 2024
1 parent aa9448d commit 03acafa
Show file tree
Hide file tree
Showing 4 changed files with 216 additions and 22 deletions.
126 changes: 126 additions & 0 deletions lib/SymmetryClosure/generate_data.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# LSP hack #src
if false #src
include("src/SymmetryClosure.jl") #src
include("../NeuralClosure/src/NeuralClosure.jl") #src
include("../../src/IncompressibleNavierStokes.jl") #src
using .SymmetryClosure #src
using .NeuralClosure #src
using .IncompressibleNavierStokes #src
end #src

########################################################################## #src

# # Data generation
#
# This script is used to generate filtered DNS data.

@info "# DNS data generation"

########################################################################## #src

@info "Loading packages"
flush(stdout)

using Adapt
using CUDA
using Dates
using JLD2
using NeuralClosure
using Random
using SymmetryClosure

########################################################################## #src

# SLURM specific variables
jobid = haskey(ENV, "SLURM_JOB_ID") ? parse(Int, ENV["SLURM_JOB_ID"]) : nothing
taskid =
haskey(ENV, "SLURM_ARRAY_TASK_ID") ? parse(Int, ENV["SLURM_ARRAY_TASK_ID"]) : nothing

isnothing(jobid) || @info "Running on SLURM (jobid = $jobid)"
isnothing(taskid) || @info "Task id = $taskid)"

########################################################################## #src

@info "Starting at $(Dates.now())"
@info """
Last commit:
$(cd(() -> read(`git log -n 1`, String), @__DIR__))
"""

########################################################################## #src

# ## Hardware selection

if CUDA.functional()
## For running on a CUDA compatible GPU
@info "Running on CUDA"
backend = CUDABackend()
CUDA.allowscalar(false)
device = x -> adapt(CuArray, x)
clean() = (GC.gc(); CUDA.reclaim())
else
## For running on CPU.
## Consider reducing the sizes of DNS, LES, and CNN layers if
## you want to test run on a laptop.
@warn "Running on CPU"
backend = CPU()
device = identity
clean() = nothing
end

########################################################################## #src

# ## Data generation
#
# Create filtered DNS data for training, validation, and testing.

# Parameters
case = SymmetryClosure.testcase()

# DNS seeds
ntrajectory = 8
seeds = splitseed(case.seed_dns, ntrajectory)

# Create data
for (iseed, seed) in enumerate(seeds)
if isnothing(taskid) || iseed == taskid
@info "Creating DNS trajectory for seed $(repr(seed))"
else
# Each task does one initial condition
@info "Skipping seed $(repr(seed)) for task $taskid"
continue
end
filenames = map(Iterators.product(params.nles, params.filters)) do nles, Φ
f = getdatafile(outdir, nles, Φ, seed)
datadir = dirname(f)
ispath(datadir) || mkpath(datadir)
f
end
data = create_les_data(; case.params..., rng = Xoshiro(seed), filenames)
@info(
"Trajectory info:",
data[1].comptime / 60,
length(data[1].t),
Base.summarysize(data) * 1e-9,
)
end

# Computational time
docomp = false
docomp && let
comptime, datasize = 0.0, 0.0
for seed in dns_seeds
comptime += load(
getdatafile(outdir, params.nles[1], params.filters[1], seed),
"comptime",
)
end
for seed in dns_seeds, nles in params.nles, Φ in params.filters
data = namedtupleload(getdatafile(outdir, nles, Φ, seed))
datasize += Base.summarysize(data)
end
@info "Data" comptime
@info "Data" comptime / 60 datasize * 1e-9
clean()
end
25 changes: 25 additions & 0 deletions lib/SymmetryClosure/src/cases.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
function testcase()
# Choose where to put output
basedir = haskey(ENV, "DEEPDIP") ? ENV["DEEPDIP"] : joinpath(@__DIR__, "..")
outdir = mkpath(joinpath(basedir, "output", "kolmogorov"))

seed_dns = 123
T = Float32

params = (;
D = 2,
lims = (T(0), T(1)),
Re = T(6e3),
tburn = T(0.5),
tsim = T(5),
savefreq = 50,
ndns = 4096,
nles = [32, 64, 128],
filters = [FaceAverage()],
icfunc = (setup, psolver, rng) ->
random_field(setup, T(0); kp = 20, psolver, rng),
method = RKMethods.LMWray3(; T),
bodyforce = (dim, x, y, t) -> (dim == 1) * 5 * sinpi(8 * y),
issteadybodyforce = true,
)
end
37 changes: 21 additions & 16 deletions lib/SymmetryClosure/src/tensorclosure.jl
Original file line number Diff line number Diff line change
@@ -1,38 +1,43 @@
function tensorclosure(model, u, θ, setup)
(; Δ) = setup.grid
@assert all(Δi -> all((Δi[1]), Δi), Array.(Δ))
Δx = Array(Δ[1])[1]
B, V = tensorbasis(u, setup)
x = model(V, θ)
x .*= Δx^2
# τ = sum(x .* B; dims = ndims(B))
τ = IncompressibleNavierStokes.lastdimcontract(x, B, setup)
τ = apply_bc_p(τ, zero(eltype(u)), setup)
IncompressibleNavierStokes.divoftensor(τ, setup)
end

function polynomial(V, θ)
s..., nV = size(V)
V = eachslice(V; dims = ndims(V))
basis = if nV == 2
cat(V[1], V[2], V[1] * V[2], V[1]^2, V[2]^2; dims = length(s) + 1)
cat(V[1], V[2], V[1] .* V[2], V[1] .^ 2, V[2] .^ 2; dims = length(s) + 1)
elseif nV == 5
cat(
V[1],
V[2],
V[3],
V[4],
V[5],
V[1] * V[2],
V[1] * V[3],
V[1] * V[4],
V[1] * V[5],
V[2] * V[3],
V[2] * V[4],
V[2] * V[5],
V[3] * V[4],
V[3] * V[5],
V[4] * V[5],
V[1]^2,
V[2]^2,
V[3]^2,
V[4]^2,
V[5]^2;
V[1] .* V[2],
V[1] .* V[3],
V[1] .* V[4],
V[1] .* V[5],
V[2] .* V[3],
V[2] .* V[4],
V[2] .* V[5],
V[3] .* V[4],
V[3] .* V[5],
V[4] .* V[5],
V[1] .^ 2,
V[2] .^ 2,
V[3] .^ 2,
V[4] .^ 2,
V[5] .^ 2;
dims = length(s) + 1,
)
end
Expand Down
50 changes: 44 additions & 6 deletions lib/SymmetryClosure/test_tensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,28 +3,31 @@ if false
using .SymmetryClosure
end

# using CairoMakie
using CairoMakie
using IncompressibleNavierStokes
using SymmetryClosure
using CUDA
using Zygote
using Random
using LinearAlgebra
using NeuralClosure

lines(cumsum(randn(100)))

# Setup
n = 8
# ax = range(0.0, 1.0, n + 1)
# x = ax, ax
x = tanh_grid(0.0, 1.0, n + 1), stretched_grid(-0.2, 1.2, n + 1)
n = 128
ax = range(0.0, 1.0, n + 1)
x = ax, ax
# x = tanh_grid(0.0, 1.0, n + 1), stretched_grid(-0.2, 1.2, n + 1)
setup = Setup(;
x,
Re = 1e4,
backend = CUDABackend(),
boundary_conditions = ((DirichletBC(), DirichletBC()), (DirichletBC(), DirichletBC())),
# boundary_conditions = ((DirichletBC(), DirichletBC()), (DirichletBC(), DirichletBC())),
);
ustart = vectorfield(setup) |> randn!
ustart = random_field(setup)
# ustart = vectorfield(setup) |> randn!

u = ustart

Expand Down Expand Up @@ -65,7 +68,42 @@ let
nothing
end

urot = NeuralClosure.rot2stag(u, 1)
B, V = tensorbasis(u, setup)
Brot, Vrot = tensorbasis(urot, setup)

iV = 2
# V[:, :, iV] |> Array |> heatmap
Vrot[:, :, iV] |> Array |> heatmap
rot2(V, 1)[:, :, iV] |> Array |> heatmap

Vrot - rot2(V, 1) .|> abs |> maximum

begin
θ = randn(5, 3)
# θ[:, 1] .= 0
# θ[:, 3] .= 0
# θ = zeros(5, 3)
# θ[3, :] .= 1e-3 * randn()
θ = θ |> CuArray
s = tensorclosure(polynomial, u, θ, setup) |> u -> apply_bc_u(u, zero(eltype(u)), setup)
srot =
tensorclosure(polynomial, urot, θ, setup) |>
u -> apply_bc_u(u, zero(eltype(u)), setup)
rots = NeuralClosure.rot2stag(s, 1)
(srot-rots)[2:end-1, 2:end-1, :] .|> abs |> maximum
end

i = 1
srot[2:end-1, 2:end-1, i] |> Array |> heatmap
rots[2:end-1, 2:end-1, i] |> Array |> heatmap
# s[2:end-1, 2:end-1, 2] |> Array |> heatmap
(srot-rots)[2:end-1, 2:end-1, i] |> Array |> heatmap
(srot-rots)[2:end-1, 2:end-1, :] .|> abs |> maximum
he = (srot-rots)[2:end-1, 2:end-1, :]
he[125:128, 1:10, 1]

x = randn(5, 5, 2)

typeof(B)
getindex.(B, 1)
Expand Down

0 comments on commit 03acafa

Please sign in to comment.