From df916ca012dd4af0b23302a83f816ad527487783 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Syver=20D=C3=B8ving=20Agdestein?= Date: Tue, 14 Jan 2025 16:00:25 +0100 Subject: [PATCH 1/6] refactor: remove PaperDC from repo --- lib/PaperDC/LICENSE | 21 - lib/PaperDC/Project.toml | 61 -- lib/PaperDC/README.md | 19 - lib/PaperDC/job_a100.sh | 29 - lib/PaperDC/job_h100.sh | 30 - lib/PaperDC/postanalysis.jl | 1312 ---------------------------- lib/PaperDC/postanalysis3D.jl | 1359 ------------------------------ lib/PaperDC/prioranalysis.jl | 467 ---------- lib/PaperDC/src/PaperDC.jl | 88 -- lib/PaperDC/src/observe.jl | 88 -- lib/PaperDC/src/rk.jl | 166 ---- lib/PaperDC/src/train.jl | 351 -------- lib/PaperDC/transferfunctions.jl | 217 ----- 13 files changed, 4208 deletions(-) delete mode 100644 lib/PaperDC/LICENSE delete mode 100644 lib/PaperDC/Project.toml delete mode 100644 lib/PaperDC/README.md delete mode 100644 lib/PaperDC/job_a100.sh delete mode 100644 lib/PaperDC/job_h100.sh delete mode 100644 lib/PaperDC/postanalysis.jl delete mode 100644 lib/PaperDC/postanalysis3D.jl delete mode 100644 lib/PaperDC/prioranalysis.jl delete mode 100644 lib/PaperDC/src/PaperDC.jl delete mode 100644 lib/PaperDC/src/observe.jl delete mode 100644 lib/PaperDC/src/rk.jl delete mode 100644 lib/PaperDC/src/train.jl delete mode 100644 lib/PaperDC/transferfunctions.jl diff --git a/lib/PaperDC/LICENSE b/lib/PaperDC/LICENSE deleted file mode 100644 index d76a52488..000000000 --- a/lib/PaperDC/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 Syver Døving Agdestein, Benjamin Sanderse, and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/lib/PaperDC/Project.toml b/lib/PaperDC/Project.toml deleted file mode 100644 index ce93ca4a5..000000000 --- a/lib/PaperDC/Project.toml +++ /dev/null @@ -1,61 +0,0 @@ -name = "PaperDC" -uuid = "0d06c178-ba13-4c21-b726-6f4489412f0d" -authors = ["Syver Døving Agdestein "] -version = "1.0.0" - -[deps] -Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" -Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" -DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" -FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" -IncompressibleNavierStokes = "5e318141-6589-402b-868d-77d7df8c442e" -JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -LoggingExtras = "e6f89c97-d47a-5376-807f-9c37f3926c36" -Lux = "b2108857-7c20-44ae-9111-449ecde12c47" -LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda" -MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" -NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -NeuralClosure = "099dac27-d7f2-4047-93d5-0baee36b9c25" -Observables = "510215fc-4207-5dde-b226-833fc4488ee2" -Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" -ParameterSchedulers = "d7d3b36b-41b8-4d0d-a2bf-768c6151755e" -Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" - -[sources.IncompressibleNavierStokes] -path = "../.." - -[sources.NeuralClosure] -path = "../NeuralClosure" - -[compat] -Accessors = "0.1" -Adapt = "4" -CUDA = "5" -CairoMakie = "0.12" -Dates = "1" -DocStringExtensions = "0.9" -EnumX = "1" -FFTW = "1" -IncompressibleNavierStokes = "2" -JLD2 = "0.5" -LaTeXStrings = "1" -LinearAlgebra = "1" -LoggingExtras = "1" -Lux = "1" -LuxCUDA = "0.3" -MLUtils = "0.4" -NNlib = "0.9" -NeuralClosure = "1" -Observables = "0.5" -Optimisers = "0.3, 0.4" -ParameterSchedulers = "0.4" -SparseArrays = "1" -julia = "1.9" diff --git a/lib/PaperDC/README.md b/lib/PaperDC/README.md deleted file mode 100644 index fc66dea5a..000000000 --- a/lib/PaperDC/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# PaperDC - -Scripts for generating results of the paper -[Discretize first, filter next: Learning divergence-consistent closure models for large-eddy simulation](https://www.sciencedirect.com/science/article/pii/S0021999124008258). - -## Set up environment - -Run: - -```sh -julia --project=lib/PaperDC -e 'using Pkg; Pkg.instantiate()' -``` - -Now you can run the scripts in this directory. They generate results for - -- `prioranalysis.jl`: Section 5.1 "Filtered DNS (2D and 3D)" -- `postanalysis.jl`: Section 5.2 "LES (2D)" -- `postanalysis3D.jl`: Appendix G.2. "LES of forced turbulence (3D)" -- `transferfunctions.jl`: Appendix E. "Continuous filters and transfer functions" diff --git a/lib/PaperDC/job_a100.sh b/lib/PaperDC/job_a100.sh deleted file mode 100644 index 863585cee..000000000 --- a/lib/PaperDC/job_a100.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --cpus-per-task=18 -#SBATCH --gpus=1 -#SBATCH --partition=gpu_a100 -#SBATCH --time=05:00:00 -#SBATCH --mail-type=BEGIN,END -#SBATCH --mail-user=sda@cwi.nl -# #SBATCH --array=1-1 - -# Note: -# - gpu_a100: 18 cores -# - gpu_h100: 16 cores -# https://servicedesk.surf.nl/wiki/display/WIKI/Snellius+partitions+and+accounting - -mkdir -p /scratch-shared/$USER - -echo "Slurm job ID: $SLURM_JOB_ID" -echo "Slurm array task ID: $SLURM_ARRAY_TASK_ID" - -export JULIA_DEPOT_PATH=/scratch-shared/$USER/.julia_a100: - -cd $HOME/projects/IncompressibleNavierStokes/lib/PaperDC - -# julia --project prioranalysis.jl -julia --project -t auto postanalysis.jl - -# julia --project -t auto -e 'using Pkg; Pkg.update()' diff --git a/lib/PaperDC/job_h100.sh b/lib/PaperDC/job_h100.sh deleted file mode 100644 index c38fd7b0e..000000000 --- a/lib/PaperDC/job_h100.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --cpus-per-task=16 -#SBATCH --gpus=1 -#SBATCH --partition=gpu_h100 -#SBATCH --time=02:00:00 -#SBATCH --mail-type=BEGIN,END -#SBATCH --mail-user=sda@cwi.nl -# #SBATCH --array=1-1 - -# Note: -# - gpu_a100: 18 cores -# - gpu_h100: 16 cores -# https://servicedesk.surf.nl/wiki/display/WIKI/Snellius+partitions+and+accounting - -mkdir -p /scratch-shared/$USER - -echo "Slurm job ID: $SLURM_JOB_ID" -echo "Slurm array task ID: $SLURM_ARRAY_TASK_ID" - -export JULIA_DEPOT_PATH=/scratch-shared/$USER/.julia_h100: - -cd $HOME/projects/IncompressibleNavierStokes/lib/PaperDC - -# julia --project prioranalysis.jl -# julia --project postanalysis.jl -julia --project postanalysis3D.jl - -# julia --project -t auto -e 'using Pkg; Pkg.update()' diff --git a/lib/PaperDC/postanalysis.jl b/lib/PaperDC/postanalysis.jl deleted file mode 100644 index d9c2c507b..000000000 --- a/lib/PaperDC/postanalysis.jl +++ /dev/null @@ -1,1312 +0,0 @@ -# # A-posteriori analysis: Large Eddy Simulation (2D) -# -# This script is used to generate results for the the paper [Agdestein2025](@citet). -# -# - Generate filtered DNS data -# - Train closure models -# - Compare filters, closure models, and projection orders -# -# The filtered DNS data is saved and can be loaded in a subesequent session. -# The learned CNN parameters are also saved. - -if false #src - include("src/PaperDC.jl") #src -end #src - -@info "Script started" - -# Color palette for consistent theme throughout paper -palette = (; color = ["#3366cc", "#cc0000", "#669900", "#ff9900"]) - -# Choose where to put output -basedir = haskey(ENV, "DEEPDIP") ? ENV["DEEPDIP"] : @__DIR__ -outdir = joinpath(basedir, "output", "kolmogorov") -plotdir = joinpath(outdir, "plots") -logdir = joinpath(outdir, "logs") -ispath(outdir) || mkpath(outdir) -ispath(plotdir) || mkpath(plotdir) -ispath(logdir) || mkpath(logdir) - -# Turn off plots for array jobs. -# If all the workers do this at the same time, one might -# error when saving the file at the same time -doplot() = true - -########################################################################## #src - -# ## Configure logger - -using PaperDC -using Dates - -# Write output to file, as the default SLURM file is not updated often enough -isslurm = haskey(ENV, "SLURM_JOB_ID") -if isslurm - jobid = parse(Int, ENV["SLURM_JOB_ID"]) - taskid = parse(Int, ENV["SLURM_ARRAY_TASK_ID"]) - logfile = "job=$(jobid)_task=$(taskid)_$(Dates.now()).out" -else - taskid = nothing - logfile = "log_$(Dates.now()).out" -end -logfile = joinpath(logdir, logfile) -setsnelliuslogger(logfile) - -@info "# A-posteriori analysis: Forced turbulence (2D)" - -# ## Load packages - -@info "Loading packages" - -using Accessors -using Adapt -# using GLMakie -using CairoMakie -using CUDA -using IncompressibleNavierStokes -using IncompressibleNavierStokes.RKMethods -using JLD2 -using LaTeXStrings -using LinearAlgebra -using Lux -using LuxCUDA -using NeuralClosure -using NNlib -using Optimisers -using ParameterSchedulers -using Random -using SparseArrays - -########################################################################## #src - -# ## Random number seeds -# -# Use a new RNG with deterministic seed for each code "section" -# so that e.g. training batch selection does not depend on whether we -# generated fresh filtered DNS data or loaded existing one (the -# generation of which would change the state of a global RNG). -# -# Note: Using `rng = Random.default_rng()` twice seems to point to the -# same RNG, and mutating one also mutates the other. -# `rng = Xoshiro()` creates an independent copy each time. -# -# We define all the seeds here. - -seeds = (; - dns = 123, # Initial conditions - θ_start = 234, # Initial CNN parameters - prior = 345, # A-priori training batch selection - post = 456, # A-posteriori training batch selection -) - -########################################################################## #src - -# ## Hardware selection - -# Precision -T = Float32 - -# Device -if CUDA.functional() - ## For running on a CUDA compatible GPU - @info "Running on CUDA" - backend = CUDABackend() - CUDA.allowscalar(false) - device = x -> adapt(CuArray, x) - clean() = (GC.gc(); CUDA.reclaim()) -else - ## For running on CPU. - ## Consider reducing the sizes of DNS, LES, and CNN layers if - ## you want to test run on a laptop. - @warn "Running on CPU" - backend = CPU() - device = identity - clean() = nothing -end - -########################################################################## #src - -# ## Data generation -# -# Create filtered DNS data for training, validation, and testing. - -# Parameters -params = (; - D = 2, - lims = (T(0), T(1)), - Re = T(6e3), - tburn = T(0.5), - tsim = T(5), - savefreq = 50, - ndns = 4096, - nles = [32, 64, 128, 256], - filters = (FaceAverage(), VolumeAverage()), - backend, - icfunc = (setup, psolver, rng) -> random_field(setup, T(0); kp = 20, psolver, rng), - method = RKMethods.Wray3(; T), - bodyforce = (dim, x, y, t) -> (dim == 1) * 5 * sinpi(8 * y), - issteadybodyforce = true, - processors = (; log = timelogger(; nupdate = 100)), -) - -# DNS seeds -ntrajectory = 8 -dns_seeds = splitseed(seeds.dns, ntrajectory) -dns_seeds_train = dns_seeds[1:ntrajectory-2] -dns_seeds_valid = dns_seeds[ntrajectory-1:ntrajectory-1] -dns_seeds_test = dns_seeds[ntrajectory:ntrajectory] - -# Create data -docreatedata = false -docreatedata && createdata(; params, seeds = dns_seeds, outdir, taskid) - -# Computational time -docomp = false -docomp && let - comptime, datasize = 0.0, 0.0 - for seed in dns_seeds - comptime += load( - getdatafile(outdir, params.nles[1], params.filters[1], seed), - "comptime", - ) - end - for seed in dns_seeds, nles in params.nles, Φ in params.filters - data = namedtupleload(getdatafile(outdir, nles, Φ, seed)) - datasize += Base.summarysize(data) - end - @info "Data" comptime - @info "Data" comptime / 60 datasize * 1e-9 - clean() -end - -# LES setups -setups = map(nles -> getsetup(; params, nles), params.nles); - -########################################################################## #src - -# ## CNN closure model - -# All training sessions will start from the same θ₀ -# for a fair comparison. - -closure, θ_start = cnn(; - setup = setups[1], - radii = [2, 2, 2, 2, 2], - channels = [24, 24, 24, 24, params.D], - activations = [tanh, tanh, tanh, tanh, identity], - use_bias = [true, true, true, true, false], - rng = Xoshiro(seeds.θ_start), -); -closure.chain - -@info "Initialized CNN with $(length(θ_start)) parameters" - -# Give the CNN a test run -# Note: Data and parameters are stored on the CPU, and -# must be moved to the GPU before use (with `device`) -let - @info "CNN warm up run" - using NeuralClosure.Zygote - u = randn(T, 32, 32, 2, 10) |> device - θ = θ_start |> device - closure(u, θ) - gradient(θ -> sum(closure(u, θ)), θ) - clean() -end - -########################################################################## #src - -# ## Training - -# ### A-priori training -# -# Train one set of CNN parameters for each of the filter types and grid sizes. -# Use the same batch selection random seed for each training setup. -# Save parameters to disk after each run. -# Plot training progress (for a validation data batch). - -# Parameter save files - -# Train -let - dotrain = false - nepoch = 50 - # niter = 200 - niter = nothing - dotrain && trainprior(; - params, - priorseed = seeds.prior, - dns_seeds_train, - dns_seeds_valid, - taskid, - outdir, - plotdir, - closure, - θ_start, - # opt = AdamW(; eta = T(1.0e-3), lambda = T(5.0e-5)), - opt = Adam(T(1.0e-3)), - λ = T(5.0e-5), - # noiselevel = T(1e-3), - scheduler = CosAnneal(; l0 = T(1e-6), l1 = T(1e-3), period = nepoch), - nvalid = 64, - batchsize = 64, - displayref = true, - displayupdates = true, # Set to `true` if using CairoMakie - nupdate_callback = 20, - loadcheckpoint = false, - nepoch, - niter, - ) -end - -# Load learned parameters and training times -priortraining = loadprior(outdir, params.nles, params.filters) -θ_cnn_prior = map(p -> copyto!(copy(θ_start), p.θ), priortraining) -@info "" θ_cnn_prior .|> extrema # Check that parameters are within reasonable bounds - -# Training times -map(p -> p.comptime, priortraining) -map(p -> p.comptime, priortraining) |> vec .|> x -> round(x; digits = 1) -map(p -> p.comptime, priortraining) |> sum |> x -> x / 60 # Minutes - -# ## Plot training history - -with_theme(; palette) do - doplot() || return - fig = Figure(; size = (950, 250)) - for (ig, nles) in enumerate(params.nles) - ax = Axis( - fig[1, ig]; - title = "n = $(nles)", - xlabel = "Iteration", - ylabel = "A-priori error", - ylabelvisible = ig == 1, - yticksvisible = ig == 1, - yticklabelsvisible = ig == 1, - ) - ylims!(-0.05, 1.05) - lines!( - ax, - [Point2f(0, 1), Point2f(priortraining[ig, 1].hist[end][1], 1)]; - label = "No closure", - linestyle = :dash, - ) - for (ifil, Φ) in enumerate(params.filters) - label = Φ isa FaceAverage ? "FA" : "VA" - lines!(ax, priortraining[ig, ifil].hist; label) - end - # lines!( - # ax, - # [Point2f(0, 0), Point2f(priortraining[ig, 1].hist[end][1], 0)]; - # label = "DNS", - # linestyle = :dash, - # ) - end - axes = filter(x -> x isa Axis, fig.content) - linkaxes!(axes...) - Legend(fig[1, end+1], axes[1]) - figdir = joinpath(plotdir, "priortraining") - ispath(figdir) || mkpath(figdir) - save("$figdir/validationerror.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ### A-posteriori training -# -# Train one set of CNN parameters for each -# projection order, filter type and grid size. -# Use the same batch selection random seed for each training setup. -# Save parameters to disk after each combination. -# Plot training progress (for a validation data batch). -# -# The time stepper `RKProject` allows for choosing when to project. - -projectorders = ProjectOrder.First, ProjectOrder.Last - -# Train -let - dotrain = false - nepoch = 10 - dotrain && trainpost(; - params, - projectorders, - outdir, - plotdir, - taskid, - postseed = seeds.post, - dns_seeds_train, - dns_seeds_valid, - nsubstep = 5, - nunroll = 10, - ntrajectory = 5, - closure, - θ_start = θ_cnn_prior, - opt = Adam(T(1e-4)), - λ = T(5e-8), - scheduler = CosAnneal(; l0 = T(1e-6), l1 = T(1e-4), period = nepoch), - nunroll_valid = 50, - nupdate_callback = 10, - displayref = false, - displayupdates = true, - loadcheckpoint = false, - nepoch, - niter = 100, - ) -end - -# Load learned parameters and training times - -posttraining = loadpost(outdir, params.nles, params.filters, projectorders) -θ_cnn_post = map(p -> copyto!(copy(θ_start), p.θ), posttraining) -@info "" θ_cnn_post .|> extrema # Check that parameters are within reasonable bounds - -# Training times -map(p -> p.comptime, posttraining) ./ 60 -map(p -> p.comptime, posttraining) |> sum |> x -> x / 60 -map(p -> p.comptime, posttraining) |> x -> reshape(x, :, 2) .|> x -> round(x; digits = 1) - -# ## Plot a-posteriori training history - -with_theme(; palette) do - doplot() || return - fig = Figure(; size = (950, 400)) - for (iorder, projectorder) in enumerate(projectorders) - axes = [] - for (ig, nles) in enumerate(params.nles) - ax = Axis( - fig[iorder, ig]; - title = "n = $(nles)", - xlabel = "Iteration", - ylabel = projectorder == ProjectOrder.First ? "DIF" : "DCF", - ylabelvisible = ig == 1, - ylabelfont = :bold, - # yticksvisible = ig == 1, - # yticklabelsvisible = ig == 1, - # yscale = log10, - titlevisible = iorder == 1, - xlabelvisible = iorder == 2, - xticksvisible = iorder == 2, - xticklabelsvisible = iorder == 2, - ) - for (ifil, Φ) in enumerate(params.filters) - postfile = PaperDC.getpostfile(outdir, nles, Φ, projectorder) - checkfile = join(splitext(postfile), "_checkpoint") - check = namedtupleload(checkfile) - (; hist) = check.callbackstate - label = Φ isa FaceAverage ? "FA" : "VA" - lines!(ax, hist; color = Cycled(ifil + 1), label) - end - ig == 4 && iorder == 1 && axislegend(ax) - push!(axes, ax) - end - # linkaxes!(axes...) - end - # axes = filter(x -> x isa Axis, fig.content) - # linkaxes!(axes...) - # Legend(fig[:, end+1], filter(x -> x isa Axis, fig.content)[1]) - Label(fig[0, :], "A-posteriori error"; valign = :bottom, font = :bold) - rowgap!(fig.layout, 10) - figdir = joinpath(plotdir, "posttraining") - ispath(figdir) || mkpath(figdir) - save("$figdir/validationerror.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ### Train Smagorinsky model -# -# Use a-posteriori error grid search to determine the optimal Smagorinsky constant. -# Find one constant for each projection order, filter type, and grid size. - -let - dotrain = false - dotrain && trainsmagorinsky(; - params, - projectorders, - outdir, - dns_seeds_train, - taskid, - nunroll = 50, - nsubstep = 5, - ninfo = 50, - θrange = range(T(0), T(0.3), 301), - ) -end - -# Load trained parameters -smag = loadsmagorinsky(outdir, params.nles, params.filters, projectorders) - -# Extract coefficients -θ_smag = getfield.(smag, :θ) - -θ_smag |> x -> reshape(x, :, 4) - -# Computational time -getfield.(smag, :comptime) -getfield.(smag, :comptime) |> sum - -########################################################################## #src - -# ## Prediction errors - -# ### Compute a-priori errors -# -# Note that it is still interesting to compute the a-priori errors for the -# a-posteriori trained CNN. -let - eprior = (; - nomodel = ones(T, length(params.nles)), - prior = zeros(T, size(θ_cnn_prior)), - post = zeros(T, size(θ_cnn_post)), - ) - for (ifil, Φ) in enumerate(params.filters), (ig, nles) in enumerate(params.nles) - @info "Computing a-priori errors" Φ nles - - setup = getsetup(; params, nles) - data = map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_test) - testset = create_io_arrays(data, setup) - i = 1:100 - u, c = testset.u[:, :, :, i], testset.c[:, :, :, i] - testset = (u, c) |> device - err = create_relerr_prior(closure, testset...) - eprior.prior[ig, ifil] = err(device(θ_cnn_prior[ig, ifil])) - for iorder in eachindex(projectorders) - eprior.post[ig, ifil, iorder] = err(device(θ_cnn_post[ig, ifil, iorder])) - end - end - jldsave(joinpath(outdir, "eprior.jld2"); eprior...) -end -clean() - -eprior = namedtupleload(joinpath(outdir, "eprior.jld2")) - -########################################################################## #src - -# ### Compute a-posteriori errors - -let - sample = namedtupleload( - getdatafile(outdir, params.nles[1], params.filters[1], dns_seeds_test[1]), - ) - sample.t[100] -end - -let - s = (length(params.nles), length(params.filters), length(projectorders)) - epost = (; - nomodel = zeros(T, s), - smag = zeros(T, s), - cnn_prior = zeros(T, s), - cnn_post = zeros(T, s), - ) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters), - (ig, nles) in enumerate(params.nles) - - @info "Computing a-posteriori errors" projectorder Φ nles - I = CartesianIndex(ig, ifil, iorder) - setup = getsetup(; params, nles) - psolver = psolver_spectral(setup) - sample = namedtupleload(getdatafile(outdir, nles, Φ, dns_seeds_test[1])) - it = 1:100 - data = (; - u = selectdim(sample.u, ndims(sample.u), it) |> collect |> device, - t = sample.t[it], - ) - nsubstep = 5 - method = RKProject(params.method, projectorder) - ## No model - err = create_relerr_post(; - data, - setup, - psolver, - method, - closure_model = nothing, - nsubstep, - ) - epost.nomodel[I] = err(nothing) - ## Smagorinsky - err = create_relerr_post(; - data, - setup, - psolver, - method, - closure_model = smagorinsky_closure(setup), - nsubstep, - ) - epost.smag[I] = err(θ_smag[I]) - ## CNN - err = create_relerr_post(; - data, - setup, - psolver, - method, - closure_model = wrappedclosure(closure, setup), - nsubstep, - ) - epost.cnn_prior[I] = err(device(θ_cnn_prior[ig, ifil])) - epost.cnn_post[I] = err(device(θ_cnn_post[I])) - clean() - end - jldsave(joinpath(outdir, "epost.jld2"); epost...) -end - -epost = namedtupleload(joinpath(outdir, "epost.jld2")) - -epost.nomodel -epost.smag -epost.cnn_prior -epost.cnn_post - -########################################################################## #src - -# ### Plot a-priori errors - -# Better for PDF export -CairoMakie.activate!() - -with_theme(; palette) do - fig = Figure(; size = (800, 300)) - axes = [] - for (ifil, Φ) in enumerate(params.filters) - ax = Axis( - fig[1, ifil]; - xscale = log10, - xticks = params.nles, - xlabel = "Resolution", - # title = "Relative a-priori error $(ifil == 1 ? " (FA)" : " (VA)")", - # title = "$(Φ isa FaceAverage ? "FA" : "VA")", - title = "$(Φ isa FaceAverage ? "Face-averaging" : "Volume-averaging")", - ylabel = "A-priori error", - ylabelvisible = ifil == 1, - yticksvisible = ifil == 1, - yticklabelsvisible = ifil == 1, - ) - for (e, marker, label, color) in [ - (eprior.nomodel, :circle, "No closure", Cycled(1)), - (eprior.prior[:, ifil], :utriangle, "CNN (prior)", Cycled(2)), - (eprior.post[:, ifil, 1], :rect, "CNN (post, DIF)", Cycled(3)), - (eprior.post[:, ifil, 2], :diamond, "CNN (post, DCF)", Cycled(4)), - ] - scatterlines!(params.nles, e; marker, color, label) - end - # axislegend(; position = :lb) - ylims!(ax, (T(-0.05), T(1.05))) - push!(axes, ax) - end - Legend(fig[1, end+1], axes[1]) - save("$plotdir/eprior.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ### Plot a-posteriori errors - -with_theme(; palette) do - doplot() || return - fig = Figure(; size = (800, 300)) - linestyles = [:solid, :dash] - for (iorder, projectorder) in enumerate(projectorders) - lesmodel = iorder == 1 ? "DIF" : "DCF" - (; nles) = params - ax = Axis( - fig[1, iorder]; - xscale = log10, - yscale = log10, - xticks = nles, - xlabel = "Resolution", - title = "$lesmodel", - ylabel = "A-posteriori error", - ylabelvisible = iorder == 1, - yticksvisible = iorder == 1, - yticklabelsvisible = iorder == 1, - ) - for (e, marker, label, color) in [ - (epost.nomodel, :circle, "No closure", Cycled(1)), - (epost.smag, :utriangle, "Smagorinsky", Cycled(2)), - (epost.cnn_prior, :rect, "CNN (Lprior)", Cycled(3)), - (epost.cnn_post, :diamond, "CNN (Lpost)", Cycled(4)), - ] - for (ifil, linestyle) in enumerate(linestyles) - ifil == 2 && (label = nothing) - scatterlines!(nles, e[:, ifil, iorder]; color, linestyle, marker, label) - end - end - # ylims!(ax, (T(0.025), T(1.00))) - end - linkaxes!(filter(x -> x isa Axis, fig.content)...) - g = GridLayout(fig[1, end+1]) - Legend(g[1, 1], filter(x -> x isa Axis, fig.content)[1]; valign = :bottom) - Legend( - g[2, 1], - map(s -> LineElement(; color = :black, linestyle = s), linestyles), - ["FA", "VA"]; - orientation = :horizontal, - valign = :top, - ) - rowsize!(g, 1, Relative(1 / 2)) - # rowgap!(g, 0) - # Legend(fig[1, end + 1], filter(x -> x isa Axis, fig.content)[1]) - # Legend( - # fig[end+1, :], - # filter(x -> x isa Axis, fig.content)[1]; - # orientation = :horizontal, - # ) - save("$plotdir/epost.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ## Energy evolution - -# ### Compute total kinetic energy as a function of time - -let - s = length(params.nles), length(params.filters), length(projectorders) - keys = [:ref, :nomodel, :smag, :cnn_prior, :cnn_post] - divergencehistory = (; map(k -> k => fill(Point2f[], s), keys)...) - energyhistory = (; map(k -> k => fill(Point2f[], s), keys)...) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters), - (ig, nles) in enumerate(params.nles) - - I = CartesianIndex(ig, ifil, iorder) - @info "Computing divergence and kinetic energy" projectorder Φ nles - setup = getsetup(; params, nles) - psolver = default_psolver(setup) - sample = namedtupleload(getdatafile(outdir, nles, Φ, dns_seeds_test[1])) - ustart = selectdim(sample.u, ndims(sample.u), 1) |> collect |> device - T = eltype(ustart) - - # Shorter time for DIF - t_DIF = T(1) - - # Reference trajectories - divergencehistory.ref[I] = let - div = scalarfield(setup) - udev = vectorfield(setup) - it = 1:5:length(sample.t) - map(it) do it - t = sample.t[it] - u = selectdim(sample.u, ndims(sample.u), it) |> collect - copyto!(udev, u) - IncompressibleNavierStokes.divergence!(div, udev, setup) - d = view(div, setup.grid.Ip) - d = sum(abs2, d) / length(d) - d = sqrt(d) - Point2f(t, d) - end - end - energyhistory.ref[I] = let - it = 1:5:length(sample.t) - udev = vectorfield(setup) - map(it) do it - t = sample.t[it] - u = selectdim(sample.u, ndims(sample.u), it) |> collect - copyto!(udev, u) - Point2f(t, total_kinetic_energy(udev, setup)) - end - end - - nupdate = 5 - writer = processor() do state - div = scalarfield(setup) - dhist = Point2f[] - ehist = zeros(Point2f, 0) - on(state) do (; u, t, n) - if n % nupdate == 0 - IncompressibleNavierStokes.divergence!(div, u, setup) - d = view(div, setup.grid.Ip) - d = sum(abs2, d) / length(d) - d = sqrt(d) - push!(dhist, Point2f(t, d)) - e = total_kinetic_energy(u, setup) - push!(ehist, Point2f(t, e)) - end - end - state[] = state[] # Compute initial divergence - (; dhist, ehist) - end - - for (sym, closure_model, θ) in [ - (:nomodel, nothing, nothing), - (:smag, smagorinsky_closure(setup), θ_smag[I]), - (:cnn_prior, wrappedclosure(closure, setup), device(θ_cnn_prior[ig, ifil])), - (:cnn_post, wrappedclosure(closure, setup), device(θ_cnn_post[I])), - ] - _, results = solve_unsteady(; - setup = (; setup..., closure_model), - ustart, - tlims = ( - sample.t[1], - projectorder == ProjectOrder.First ? t_DIF : sample.t[end], - ), - Δt_min = T(1e-5), - method = RKProject(params.method, projectorder), - processors = (; writer, logger = timelogger(; nupdate = 500)), - psolver, - θ, - ) - divergencehistory[sym][I] = results.writer.dhist - energyhistory[sym][I] = results.writer.ehist - end - end - jldsave(joinpath(outdir, "history.jld2"); energyhistory, divergencehistory) - clean() -end - -(; divergencehistory, energyhistory) = namedtupleload(joinpath(outdir, "history.jld2")); - -########################################################################## #src - -# Check that energy is within reasonable bounds -energyhistory.ref .|> extrema -energyhistory.nomodel .|> extrema -energyhistory.smag .|> extrema -energyhistory.cnn_prior .|> extrema -energyhistory.cnn_post .|> extrema - -# Check that divergence is within reasonable bounds -divergencehistory.ref .|> extrema -divergencehistory.nomodel .|> extrema -divergencehistory.smag .|> extrema -divergencehistory.cnn_prior .|> extrema -divergencehistory.cnn_post .|> extrema - -########################################################################## #src - -# ### Plot energy evolution - -with_theme(; palette) do - doplot() || return - for (igrid, nles) in enumerate(params.nles) - @info "Plotting energy evolution" nles - fig = Figure(; size = (800, 450)) - g = GridLayout(fig[1, 1]) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters) - - I = CartesianIndex(igrid, ifil, iorder) - subfig = g[ifil, iorder] - ax = Axis( - subfig; - # xscale = log10, - # yscale = log10, - xlabel = "t", - # ylabel = Φ isa FaceAverage ? "Face-average" : "Volume-average", - ylabel = "E(t)", - # ylabelfont = :bold, - title = projectorder == ProjectOrder.First ? "DIF" : "DCF", - titlevisible = ifil == 1, - xlabelvisible = ifil == 2, - xticksvisible = ifil == 2, - xticklabelsvisible = ifil == 2, - ylabelvisible = iorder == 1, - yticksvisible = iorder == 1, - yticklabelsvisible = iorder == 1, - ) - # xlims!(ax, (1e-2, 5.0)) - # xlims!(ax, (0.0, 1.0)) - # ylims!(ax, (1.3, 2.3)) - plots = [ - (energyhistory.nomodel, :solid, 1, "No closure"), - (energyhistory.smag, :solid, 2, "Smagorinsky"), - (energyhistory.cnn_prior, :solid, 3, "CNN (prior)"), - (energyhistory.cnn_post, :solid, 4, "CNN (post)"), - (energyhistory.ref, :dash, 1, "Reference"), - ] - for (p, linestyle, i, label) in plots - lines!(ax, p[I]; color = Cycled(i), linestyle, label) - iorder == 1 && xlims!(-0.05, 1.05) - # iorder == 1 && ylims!(1.1, 3.1) - ylims!(1.3, 3.0) - end - - # Plot zoom-in box - if iorder == 2 - tlims = iorder == 1 ? (0.05, 0.2) : (0.8, 1.2) - i1 = findfirst(p -> p[1] > tlims[1], energyhistory.ref[I]) - i2 = findfirst(p -> p[1] > tlims[2], energyhistory.ref[I]) - tlims = energyhistory.ref[I][i1][1], energyhistory.ref[I][i2][1] - klims = energyhistory.ref[I][i1][2], energyhistory.ref[I][i2][2] - dk = klims[2] - klims[1] - # klims = klims[1] - 0.2 * dk, klims[2] + 0.2 * dk - w = iorder == 1 ? 0.2 : 0.1 - klims = klims[1] + w * dk, klims[2] - w * dk - box = [ - Point2f(tlims[1], klims[1]), - Point2f(tlims[2], klims[1]), - Point2f(tlims[2], klims[2]), - Point2f(tlims[1], klims[2]), - Point2f(tlims[1], klims[1]), - ] - lines!(ax, box; color = :black) - ax2 = Axis( - subfig; - # bbox = BBox(0.8, 0.9, 0.2, 0.3), - width = Relative(0.35), - height = Relative(0.35), - halign = 0.05, - valign = 0.95, - limits = (tlims..., klims...), - xscale = log10, - yscale = log10, - xticksvisible = false, - xticklabelsvisible = false, - xgridvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - ygridvisible = false, - backgroundcolor = :white, - ) - # https://discourse.julialang.org/t/makie-inset-axes-and-their-drawing-order/60987/5 - translate!(ax2.scene, 0, 0, 10) - translate!(ax2.elements[:background], 0, 0, 9) - for (p, linestyle, i, label) in plots - lines!(ax2, p[igrid, ifil, iorder]; color = Cycled(i), linestyle, label) - end - end - - Label( - g[ifil, 0], - # Φ isa FaceAverage ? "Face-average" : "Volume-average"; - Φ isa FaceAverage ? "FA" : "VA"; - # halign = :right, - font = :bold, - # rotation = pi/2, - tellheight = false, - ) - end - colgap!(g, 10) - rowgap!(g, 10) - # colsize!(g, 1, Relative(1 / 5)) - Legend(fig[:, end+1], filter(x -> x isa Axis, fig.content)[1]) - name = "$plotdir/energy_evolution/" - ispath(name) || mkpath(name) - save("$(name)/nles=$(nles).pdf", fig) - display(fig) - end -end - -########################################################################## #src - -# ### Plot Divergence - -with_theme(; palette) do - doplot() || return - islog = true - for (igrid, nles) in enumerate(params.nles) - @info "Plotting divergence" nles - fig = Figure(; size = (800, 450)) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters) - - I = CartesianIndex(igrid, ifil, iorder) - subfig = fig[ifil, iorder] - ax = Axis( - subfig; - yscale = islog ? log10 : identity, - xlabel = "t", - ylabel = Φ isa FaceAverage ? "Face-average" : "Volume-average", - ylabelfont = :bold, - title = projectorder == ProjectOrder.First ? "DIF" : "DCF", - titlevisible = ifil == 1, - xlabelvisible = ifil == 2, - xticksvisible = ifil == 2, - xticklabelsvisible = ifil == 2, - ylabelvisible = iorder == 1, - yticksvisible = iorder == 1, - yticklabelsvisible = iorder == 1, - ) - lines!(ax, divergencehistory.nomodel[I]; label = "No closure") - lines!(ax, divergencehistory.smag[I]; label = "Smagorinsky") - lines!(ax, divergencehistory.cnn_prior[I]; label = "CNN (prior)") - lines!(ax, divergencehistory.cnn_post[I]; label = "CNN (post)") - lines!( - ax, - divergencehistory.ref[I]; - color = Cycled(1), - linestyle = :dash, - label = "Reference", - ) - islog && ylims!(ax, (T(1e-6), T(1e3))) - iorder == 1 && xlims!(ax, (-0.05, 1.05)) - end - rowgap!(fig.layout, 10) - colgap!(fig.layout, 10) - Legend(fig[:, end+1], filter(x -> x isa Axis, fig.content)[1]) - name = "$plotdir/divergence/" - ispath(name) || mkpath(name) - save("$(name)/nles=$(nles).pdf", fig) - display(fig) - end -end - -########################################################################## #src - -# ## Solutions at different times - -let - s = length(params.nles), length(params.filters), length(projectorders) - temp = zeros(T, ntuple(Returns(0), params.D + 1)) - keys = [:ref, :nomodel, :smag, :cnn_prior, :cnn_post] - times = T[0.1, 0.5, 1.0, 5.0] - itime_max_DIF = 3 - times_exact = copy(times) - utimes = map(t -> (; map(k -> k => fill(temp, s), keys)...), times) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters), - (igrid, nles) in enumerate(params.nles) - - @info "Computing test solutions" projectorder Φ nles - I = CartesianIndex(igrid, ifil, iorder) - setup = getsetup(; params, nles) - psolver = default_psolver(setup) - sample = namedtupleload(getdatafile(outdir, nles, Φ, dns_seeds_test[1])) - ustart = selectdim(sample.u, ndims(sample.u), 1) |> collect - t = sample.t - solve(ustart, tlims, closure_model, θ) = - solve_unsteady(; - setup = (; setup..., closure_model), - ustart = device(ustart), - tlims, - method = RKProject(params.method, projectorder), - psolver, - θ, - )[1].u |> Array - t1 = t[1] - for i in eachindex(times) - # Only first times for First - i > itime_max_DIF && projectorder == ProjectOrder.First && continue - - # Time interval - t0 = t1 - t1 = times[i] - - # Adjust t1 to be exactly on a reference time - it = findfirst(>(t1), t) - if isnothing(it) - # Not found: Final time - it = length(t) - end - t1 = t[it] - tlims = (t0, t1) - times_exact[i] = t1 - - getprev(i, sym) = i == 1 ? ustart : utimes[i-1][sym][I] - - # Compute fields - utimes[i].ref[I] = selectdim(sample.u, ndims(sample.u), it) |> collect - utimes[i].nomodel[I] = solve(getprev(i, :nomodel), tlims, nothing, nothing) - utimes[i].smag[I] = - solve(getprev(i, :smag), tlims, smagorinsky_closure(setup), θ_smag[I]) - utimes[i].cnn_prior[I] = solve( - getprev(i, :cnn_prior), - tlims, - wrappedclosure(closure, setup), - device(θ_cnn_prior[igrid, ifil]), - ) - utimes[i].cnn_post[I] = solve( - getprev(i, :cnn_post), - tlims, - wrappedclosure(closure, setup), - device(θ_cnn_post[I]), - ) - end - clean() - end - jldsave("$outdir/solutions.jld2"; u = utimes, t = times_exact, itime_max_DIF) -end; -clean(); - -# Load solution -solutions = namedtupleload("$outdir/solutions.jld2"); - -########################################################################## #src - -# ### Plot spectra -# -# Plot kinetic energy spectra. - -with_theme(; palette) do - doplot() || return - for (ifil, Φ) in enumerate(params.filters), (igrid, nles) in enumerate(params.nles) - @info "Plotting spectra" Φ nles - fig = Figure(; size = (800, 450)) - fil = Φ isa FaceAverage ? "face-average" : "volume-average" - setup = getsetup(; params, nles) - (; Ip) = setup.grid - (; inds, κ, K) = IncompressibleNavierStokes.spectral_stuff(setup) - kmax = maximum(κ) - allaxes = [] - for (iorder, projectorder) in enumerate(projectorders) - rowaxes = [] - for (itime, t) in enumerate(solutions.t) - # Only first time for First - projectorder == ProjectOrder.First && - itime > solutions.itime_max_DIF && - continue - - fields = map( - k -> solutions.u[itime][k][igrid, ifil, iorder] |> device, - [:ref, :nomodel, :smag, :cnn_prior, :cnn_post], - ) - specs = map(fields) do u - state = (; u) - spec = observespectrum(state; setup) - spec.ehat[] - end - ## Build inertial slope above energy - logkrange = [0.45 * log(kmax), 0.85 * log(kmax)] - krange = exp.(logkrange) - slope, slopelabel = -T(3), L"$\kappa^{-3}$" - slopeconst = maximum(specs[1] ./ κ .^ slope) - offset = 3 - inertia = offset .* slopeconst .* krange .^ slope - ## Nice ticks - logmax = round(Int, log2(kmax + 1)) - # xticks = T(2) .^ (0:logmax) - if logmax > 5 - xticks = T[1, 4, 16, 64, 256] - else - xticks = T[1, 2, 4, 8, 16, 32] - end - ## Make plot - irow = projectorder == ProjectOrder.First ? 2 : 1 - subfig = fig[irow, itime] - ax = Axis( - subfig; - xticks, - xlabel = "κ", - xlabelvisible = irow == 2, - xticksvisible = irow == 2, - xticklabelsvisible = irow == 2, - ylabel = projectorder == ProjectOrder.First ? "DIF" : "DCF", - ylabelfont = :bold, - ylabelvisible = itime == 1, - yticksvisible = itime == 1, - yticklabelsvisible = itime == 1, - xscale = log2, - yscale = log10, - limits = (1, kmax, T(1e-8), T(1)), - title = irow == 1 ? "t = $(round(t; digits = 1))" : "", - ) - - # Plot zoom-in box - k1, k2 = klims = extrema(κ) - center = 0.8 - dk = 0.025 - logklims = (center - dk) * log(k2), (center + dk) * log(k2) - k1, k2 = klims = exp.(logklims) - i1 = findfirst(>(k1), κ) - i2 = findfirst(>(k2), κ) - elims = specs[1][i1], specs[1][i2] - loge1, loge2 = log.(elims) - de = (loge1 - loge2) * 0.05 - logelims = loge1 + de, loge2 - de - elims = exp.(logelims) - box = [ - Point2f(klims[1], elims[1]), - Point2f(klims[2], elims[1]), - Point2f(klims[2], elims[2]), - Point2f(klims[1], elims[2]), - Point2f(klims[1], elims[1]), - ] - ax_zoom = Axis( - subfig; - width = Relative(0.45), - height = Relative(0.4), - halign = 0.05, - valign = 0.05, - limits = (klims..., reverse(elims)...), - xscale = log10, - yscale = log10, - xticksvisible = false, - xticklabelsvisible = false, - xgridvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - ygridvisible = false, - backgroundcolor = :white, - ) - # https://discourse.julialang.org/t/makie-inset-axes-and-their-drawing-order/60987/5 - translate!(ax_zoom.scene, 0, 0, 10) - translate!(ax_zoom.elements[:background], 0, 0, 9) - - # Plot lines in both axes - for ax in (ax, ax_zoom) - lines!(ax, κ, specs[2]; color = Cycled(1), label = "No model") - lines!(ax, κ, specs[3]; color = Cycled(2), label = "Smagorinsky") - lines!(ax, κ, specs[4]; color = Cycled(3), label = "CNN (prior)") - lines!(ax, κ, specs[5]; color = Cycled(4), label = "CNN (post)") - lines!( - ax, - κ, - specs[1]; - color = Cycled(1), - linestyle = :dash, - label = "Reference", - ) - lines!( - ax, - krange, - inertia; - color = Cycled(1), - label = slopelabel, - linestyle = :dot, - ) - end - - # Show box in main plot - lines!(ax, box; color = :black) - - # axislegend(ax; position = :lb) - autolimits!(ax) - - push!(allaxes, ax) - push!(rowaxes, ax) - end - linkaxes!(rowaxes...) - end - # linkaxes!(allaxes...) - # linkaxes!(filter(x -> x isa Axis, fig.content)...) - Legend( - fig[2, solutions.itime_max_DIF+1:end], - fig.content[1]; - tellwidth = false, - tellheight = false, - # width = Auto(false), - # height = Auto(false), - # halign = :left, - # framevisible = false, - ) - Label( - fig[0, 1:end], - "Energy spectra ($fil, n = $nles)"; - valign = :bottom, - font = :bold, - ) - rowgap!(fig.layout, 10) - colgap!(fig.layout, 10) - # ylims!(ax, (T(1e-3), T(0.35))) - specdir = "$plotdir/spectra/" - ispath(specdir) || mkpath(specdir) - name = splatfileparts(; filter = Φ, nles) - save("$specdir/$name.pdf", fig) - display(fig) - end -end - -########################################################################## #src - -# ### Plot fields - -with_theme(; palette) do - doplot() || return - ## Reference box for eddy comparison - x1 = 0.3 - x2 = 0.5 - y1 = 0.5 - y2 = 0.7 - box = [ - Point2f(x1, y1), - Point2f(x2, y1), - Point2f(x2, y2), - Point2f(x1, y2), - Point2f(x1, y1), - ] - for (ifil, Φ) in enumerate(params.filters) - Φ isa FaceAverage || continue - # fig = Figure(; size = (710, 400)) - fig = Figure(; size = (770, 360)) - irow = 0 - itime = 3 - for (igrid, nles) in enumerate(params.nles) - itime == 1 && (nles in [32, 64] || continue) - itime == 3 && (nles in [64, 128] || continue) - # nles in [128, 256] || continue - irow += 1 - setup = getsetup(; params, nles) - (; Ip, xp) = setup.grid - xplot = xp[1][2:end-1], xp[2][2:end-1] - xplot = xplot .|> Array - # lesmodel = iorder == 1 ? "DIF" : "DCF" - # fig = fieldplot( - # (; u, temp = nothing, t = T(0)); - # setup, - # title, - # docolorbar = false, - # size = (500, 500), - # ) - - utime = solutions.u[itime] - icol = 0 - - for (u, title) in [ - (utime.nomodel[igrid, ifil, 2], "No closure"), - (utime.nomodel[igrid, ifil, 2], "Smagorinsky (DCF)"), - # (utime.cnn_prior[igrid, ifil, 2], "CNN (prior, DCF)"), - (utime.cnn_post[igrid, ifil, 1], "CNN (post, DIF)"), - (utime.cnn_post[igrid, ifil, 2], "CNN (post, DCF)"), - (utime.ref[igrid, ifil, 2], "Reference"), - ] - icol += 1 - u = device(u) - w = vorticity(u, setup) - w = interpolate_ω_p(w, setup) - w = w[Ip] |> Array - colorrange = get_lims(w) - ax = Axis( - fig[irow, icol]; - title, - xticksvisible = false, - xticklabelsvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - ylabel = "n = $nles", - ylabelvisible = icol == 1, - titlevisible = irow == 1, - aspect = DataAspect(), - ) - heatmap!(ax, xplot..., w; colorrange) - lines!(ax, box; linewidth = 3, color = Cycled(2)) # Red in palette - end - end - fil = Φ isa FaceAverage ? "face-average" : "volume-average" - tlab = round(solutions.t[itime]; digits = 1) - Label(fig[0, 1:end], "Vorticity ($fil, t = $tlab)"; valign = :bottom, font = :bold) - colgap!(fig.layout, 10) - rowgap!(fig.layout, 10) - display(fig) - path = "$plotdir/les_fields" - ispath(path) || mkpath(path) - name = splatfileparts(; itime, filter = Φ) - name = joinpath(path, name) - fname = "$(name).png" - save(fname, fig) - end -end - -# Plot vorticity -let - doplot() || return - nles = 64 - sample = namedtupleload(getdatafile(outdir, nles, FaceAverage(), dns_seeds_test[1])) - setup = getsetup(; params, nles) - u = selectdim(sample.u, ndims(sample.u), 1) |> collect |> device - w = vorticity(u, setup) |> Array |> Observable - title = sample.t[1] |> string |> Observable - fig = heatmap(w; axis = (; title)) - for i = 1:5:1000 - u = selectdim(sample.u, ndims(sample.u), i) |> collect |> device - w[] = vorticity(u, setup) |> Array - title[] = "t = $(round(sample.t[i]; digits = 2))" - display(fig) - sleep(0.05) - end -end diff --git a/lib/PaperDC/postanalysis3D.jl b/lib/PaperDC/postanalysis3D.jl deleted file mode 100644 index f932efaab..000000000 --- a/lib/PaperDC/postanalysis3D.jl +++ /dev/null @@ -1,1359 +0,0 @@ -# # A-posteriori analysis: Large Eddy Simulation (2D) -# -# This script is used to generate results for the the paper [Agdestein2025](@citet). -# -# - Generate filtered DNS data -# - Train closure models -# - Compare filters, closure models, and projection orders -# -# The filtered DNS data is saved and can be loaded in a subesequent session. -# The learned CNN parameters are also saved. - -if false #src - include("src/PaperDC.jl") #src -end #src - -@info "Script started" - -# Color palette for consistent theme throughout paper -palette = (; color = ["#3366cc", "#cc0000", "#669900", "#ff9900"]) - -# Choose where to put output -basedir = haskey(ENV, "DEEPDIP") ? ENV["DEEPDIP"] : @__DIR__ -outdir = joinpath(basedir, "output", "kolmogorov3D") -plotdir = joinpath(outdir, "plots") -logdir = joinpath(outdir, "logs") -ispath(outdir) || mkpath(outdir) -ispath(plotdir) || mkpath(plotdir) -ispath(logdir) || mkpath(logdir) - -# Turn off plots for array jobs. -# If all the workers do this at the same time, one might -# error when saving the file at the same time -doplot() = true - -########################################################################## #src - -# ## Configure logger - -using PaperDC -using Dates - -# Write output to file, as the default SLURM file is not updated often enough -isslurm = haskey(ENV, "SLURM_JOB_ID") -if isslurm - jobid = parse(Int, ENV["SLURM_JOB_ID"]) - taskid = parse(Int, ENV["SLURM_ARRAY_TASK_ID"]) - logfile = "job=$(jobid)_task=$(taskid)_$(Dates.now()).out" -else - taskid = nothing - logfile = "log_$(Dates.now()).out" -end -logfile = joinpath(logdir, logfile) -setsnelliuslogger(logfile) - -@info "# A-posteriori analysis: Forced turbulence (3D)" - -# ## Load packages - -@info "Loading packages" - -using Accessors -using Adapt -# using GLMakie -using CairoMakie -using CUDA -using IncompressibleNavierStokes -using IncompressibleNavierStokes.RKMethods -using JLD2 -using LaTeXStrings -using LinearAlgebra -using Lux -using LuxCUDA -using NeuralClosure -using NNlib -using Optimisers -using ParameterSchedulers -using Random -using SparseArrays - -########################################################################## #src - -# ## Random number seeds -# -# Use a new RNG with deterministic seed for each code "section" -# so that e.g. training batch selection does not depend on whether we -# generated fresh filtered DNS data or loaded existing one (the -# generation of which would change the state of a global RNG). -# -# Note: Using `rng = Random.default_rng()` twice seems to point to the -# same RNG, and mutating one also mutates the other. -# `rng = Xoshiro()` creates an independent copy each time. -# -# We define all the seeds here. - -seeds = (; - dns = 123, # Initial conditions - θ_start = 234, # Initial CNN parameters - prior = 345, # A-priori training batch selection - post = 456, # A-posteriori training batch selection -) - -########################################################################## #src - -# ## Hardware selection - -# Precision -T = Float32 - -# Device -if CUDA.functional() - ## For running on a CUDA compatible GPU - @info "Running on CUDA" - backend = CUDABackend() - CUDA.allowscalar(false) - device = x -> adapt(CuArray, x) - clean() = (GC.gc(); CUDA.reclaim()) -else - ## For running on CPU. - ## Consider reducing the sizes of DNS, LES, and CNN layers if - ## you want to test run on a laptop. - @warn "Running on CPU" - backend = CPU() - device = identity - clean() = nothing -end - -########################################################################## #src - -# ## Data generation -# -# Create filtered DNS data for training, validation, and testing. - -# Parameters -params = (; - D = 3, - lims = (T(0), T(1)), - Re = T(2e3), - tburn = T(0.5), - tsim = T(3), - savefreq = 50, - ndns = 1024, - nles = [32, 64], - # filters = (FaceAverage(), VolumeAverage()), - filters = (FaceAverage(),), - backend, - icfunc = (setup, psolver, rng) -> random_field(setup, T(0); kp = 20, psolver, rng), - method = RKMethods.Wray3(; T), - bodyforce = (dim, x, y, z, t) -> (dim == 1) * 5 * sinpi(8 * y), - issteadybodyforce = true, - processors = (; log = timelogger(; nupdate = 100)), -) - -# DNS seeds -ntrajectory = 10 -dns_seeds = splitseed(seeds.dns, ntrajectory) -dns_seeds_test = dns_seeds[1:1] -# dns_seeds_valid = dns_seeds[2:2] -# dns_seeds_train = dns_seeds[3:end] -dns_seeds_valid = dns_seeds[2:2] -dns_seeds_train = dns_seeds[2:end] - -# Create data -docreatedata = false -docreatedata && createdata(; - params = (; - params..., - # Allocating a full vector field is too expensive for DNS - issteadybodyforce = false, - ), - seeds = dns_seeds, - outdir, - taskid, -) - -# 185 * 10 = 1850 snapshots -# 1850 / 64 = 28.9 batches - -# Computational time -docomp = false -docomp && let - comptime, datasize = 0.0, 0.0 - for seed in dns_seeds - comptime += load( - getdatafile(outdir, params.nles[1], params.filters[1], seed), - "comptime", - ) - end - for seed in dns_seeds, nles in params.nles, Φ in params.filters - data = namedtupleload(getdatafile(outdir, nles, Φ, seed)) - datasize += Base.summarysize(data) - end - @info "Data" comptime - @info "Data" comptime / 60 datasize * 1e-9 - clean() -end - -# LES setups -setups = map(nles -> getsetup(; params, nles), params.nles); - -########################################################################## #src - -# ## CNN closure model - -# All training sessions will start from the same θ₀ -# for a fair comparison. - -closure, θ_start = cnn(; - setup = setups[1], - radii = [2, 2, 2, 2, 2], - channels = [24, 24, 24, 24, params.D], - activations = [tanh, tanh, tanh, tanh, identity], - use_bias = [true, true, true, true, false], - rng = Xoshiro(seeds.θ_start), -); -closure.chain - -@info "Initialized CNN with $(length(θ_start)) parameters" - -# Give the CNN a test run -# Note: Data and parameters are stored on the CPU, and -# must be moved to the GPU before use (with `device`) -let - @info "CNN warm up run" - using NeuralClosure.Zygote - u = randn(T, 32, 32, 32, 3, 10) |> device - θ = θ_start |> device - closure(u, θ) - gradient(θ -> sum(closure(u, θ)), θ) - clean() -end - -########################################################################## #src - -# ## Training - -# ### A-priori training -# -# Train one set of CNN parameters for each of the filter types and grid sizes. -# Use the same batch selection random seed for each training setup. -# Save parameters to disk after each run. -# Plot training progress (for a validation data batch). - -# Parameter save files - -# 200 * 50 = 10_000 -# 28 * 400 = 11_200 - -# Train -let - dotrain = false - nepoch = 100 - # niter = 200 - niter = nothing - dotrain && trainprior(; - params, - priorseed = seeds.prior, - dns_seeds_train, - dns_seeds_valid, - taskid, - outdir, - plotdir, - closure, - θ_start, - # opt = AdamW(; eta = T(1.0e-3), lambda = T(5.0e-5)), - opt = Adam(T(1.0e-3)), - λ = T(5.0e-5), - # noiselevel = T(1e-3), - scheduler = CosAnneal(; l0 = T(1e-6), l1 = T(1e-3), period = nepoch), - nvalid = 64, - batchsize = 64, - displayref = true, - displayupdates = true, # Set to `true` if using CairoMakie - nupdate_callback = 10, - loadcheckpoint = false, - nepoch, - niter, - ) -end - -# Load learned parameters and training times -priortraining = loadprior(outdir, params.nles, params.filters) -θ_cnn_prior = map(p -> copyto!(copy(θ_start), p.θ), priortraining) -@info "" θ_cnn_prior .|> extrema # Check that parameters are within reasonable bounds - -# Training times -map(p -> p.comptime, priortraining) -map(p -> p.comptime, priortraining) |> vec .|> x -> round(x; digits = 1) -map(p -> p.comptime, priortraining) |> sum |> x -> x / 60 # Minutes - -# ## Plot training history - -with_theme(; palette) do - # Turn off for array jobs. - # If all the workers do this at the same time, one might - # error when saving the file at the same time - doplot() || return - fig = Figure(; size = (950, 250)) - for (ig, nles) in enumerate(params.nles) - ax = Axis( - fig[1, ig]; - title = "n = $(nles)", - xlabel = "Iteration", - ylabel = "A-priori error", - ylabelvisible = ig == 1, - yticksvisible = ig == 1, - yticklabelsvisible = ig == 1, - ) - ylims!(-0.05, 1.05) - lines!( - ax, - [Point2f(0, 1), Point2f(priortraining[ig, 1].hist[end][1], 1)]; - label = "No closure", - linestyle = :dash, - ) - for (ifil, Φ) in enumerate(params.filters) - label = Φ isa FaceAverage ? "FA" : "VA" - lines!(ax, priortraining[ig, ifil].hist; label) - end - # lines!( - # ax, - # [Point2f(0, 0), Point2f(priortraining[ig, 1].hist[end][1], 0)]; - # label = "DNS", - # linestyle = :dash, - # ) - end - axes = filter(x -> x isa Axis, fig.content) - linkaxes!(axes...) - Legend(fig[1, end+1], axes[1]) - figdir = joinpath(plotdir, "priortraining") - ispath(figdir) || mkpath(figdir) - save("$figdir/validationerror.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ### A-posteriori training -# -# Train one set of CNN parameters for each -# projection order, filter type and grid size. -# Use the same batch selection random seed for each training setup. -# Save parameters to disk after each combination. -# Plot training progress (for a validation data batch). -# -# The time stepper `RKProject` allows for choosing when to project. - -projectorders = [ProjectOrder.Last] - -# Train -let - dotrain = false - nepoch = 10 - dotrain && trainpost(; - params, - projectorders, - outdir, - plotdir, - taskid, - postseed = seeds.post, - dns_seeds_train, - dns_seeds_valid, - nsubstep = 4, - nunroll = 3, - ntrajectory = 1, - closure, - θ_start = θ_cnn_prior, - opt = Adam(T(1e-4)), - λ = T(5e-8), - scheduler = CosAnneal(; l0 = T(1e-6), l1 = T(1e-4), period = nepoch), - nunroll_valid = 50, - nupdate_callback = 10, - displayref = false, - displayupdates = true, - loadcheckpoint = false, - nepoch, - niter = 10, - ) -end - -# x = namedtupleload(getdatafile(outdir, params.nles[1], params.filters[1], dns_seeds_train[1])) -# x.t[2] - x.t[1] - -# Load learned parameters and training times - -posttraining = loadpost(outdir, params.nles, params.filters, projectorders) -θ_cnn_post = map(p -> copyto!(copy(θ_start), p.θ), posttraining) -@info "" θ_cnn_post .|> extrema # Check that parameters are within reasonable bounds - -# Training times -map(p -> p.comptime, posttraining) ./ 60 -map(p -> p.comptime, posttraining) |> sum |> x -> x / 60 -map(p -> p.comptime, posttraining) |> x -> reshape(x, :, 2) .|> x -> round(x; digits = 1) - -# ## Plot a-posteriori training history - -with_theme(; palette) do - # Turn off for array jobs. - # If all the workers do this at the same time, one might - # error when saving the file at the same time - doplot() || return - fig = Figure(; size = (950, 400)) - for (iorder, projectorder) in enumerate(projectorders) - axes = [] - for (ig, nles) in enumerate(params.nles) - ax = Axis( - fig[iorder, ig]; - title = "n = $(nles)", - xlabel = "Iteration", - # ylabel = projectorder == ProjectOrder.First ? "DIF" : "DCF", - # ylabelvisible = ig == 1, - ylabelfont = :bold, - # yticksvisible = ig == 1, - # yticklabelsvisible = ig == 1, - # yscale = log10, - titlevisible = iorder == 1, - # xlabelvisible = iorder == 2, - # xticksvisible = iorder == 2, - # xticklabelsvisible = iorder == 2, - ) - for (ifil, Φ) in enumerate(params.filters) - postfile = PaperDC.getpostfile(outdir, nles, Φ, projectorder) - checkfile = join(splitext(postfile), "_checkpoint") - check = namedtupleload(checkfile) - (; hist) = check.callbackstate - label = Φ isa FaceAverage ? "FA" : "VA" - lines!(ax, hist; color = Cycled(ifil + 1), label) - end - ig == 4 && iorder == 1 && axislegend(ax) - push!(axes, ax) - end - # linkaxes!(axes...) - end - # axes = filter(x -> x isa Axis, fig.content) - # linkaxes!(axes...) - # Legend(fig[:, end+1], filter(x -> x isa Axis, fig.content)[1]) - Label(fig[0, :], "A-posteriori error"; valign = :bottom, font = :bold) - rowgap!(fig.layout, 10) - figdir = joinpath(plotdir, "posttraining") - ispath(figdir) || mkpath(figdir) - save("$figdir/validationerror.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ### Train Smagorinsky model -# -# Use a-posteriori error grid search to determine the optimal Smagorinsky constant. -# Find one constant for each projection order, filter type, and grid size. - -let - dotrain = false - dotrain && trainsmagorinsky(; - params, - projectorders, - outdir, - dns_seeds_train, - taskid, - nunroll = 50, - nsubstep = 5, - ninfo = 50, - θrange = range(T(0), T(0.3), 301), - ) -end - -# # Load trained parameters -# smag = loadsmagorinsky(outdir, params.nles, params.filters, projectorders) - -# # Extract coefficients -# θ_smag = getfield.(smag, :θ) - -# # Computational time -# getfield.(smag, :comptime) -# getfield.(smag, :comptime) |> sum - -θ_smag = fill(T(0.17), length(params.nles), length(params.filters), length(projectorders)) - -########################################################################## #src - -# ## Prediction errors - -# ### Compute a-priori errors -# -# Note that it is still interesting to compute the a-priori errors for the -# a-posteriori trained CNN. -let - eprior = (; - nomodel = ones(T, length(params.nles)), - prior = zeros(T, size(θ_cnn_prior)), - post = zeros(T, size(θ_cnn_post)), - ) - for (ifil, Φ) in enumerate(params.filters), (ig, nles) in enumerate(params.nles) - @info "Computing a-priori errors" Φ nles - - setup = getsetup(; params, nles) - data = map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_test) - testset = create_io_arrays(data, setup) - i = 1:100 - u, c = testset.u[:, :, :, :, i], testset.c[:, :, :, :, i] - testset = (u, c) |> device - err = create_relerr_prior(closure, testset...) - eprior.prior[ig, ifil] = err(device(θ_cnn_prior[ig, ifil])) - for iorder in eachindex(projectorders) - eprior.post[ig, ifil, iorder] = err(device(θ_cnn_post[ig, ifil, iorder])) - end - end - jldsave(joinpath(outdir, "eprior.jld2"); eprior...) -end -clean() - -eprior = namedtupleload(joinpath(outdir, "eprior.jld2")) - -########################################################################## #src - -# ### Compute a-posteriori errors - -let - sample = namedtupleload( - getdatafile(outdir, params.nles[1], params.filters[1], dns_seeds_test[1]), - ) - sample.t[20] -end - -let - s = (length(params.nles), length(params.filters), length(projectorders)) - epost = (; - nomodel = zeros(T, s), - smag = zeros(T, s), - cnn_prior = zeros(T, s), - cnn_post = zeros(T, s), - ) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters), - (ig, nles) in enumerate(params.nles) - - @info "Computing a-posteriori errors" projectorder Φ nles - I = CartesianIndex(ig, ifil, iorder) - setup = getsetup(; params, nles) - psolver = psolver_spectral(setup) - sample = namedtupleload(getdatafile(outdir, nles, Φ, dns_seeds_test[1])) - it = 1:20 - data = (; - u = selectdim(sample.u, ndims(sample.u), it) |> collect |> device, - t = sample.t[it], - ) - nsubstep = 5 - method = RKProject(params.method, projectorder) - ## No model - err = create_relerr_post(; - data, - setup, - psolver, - method, - closure_model = nothing, - nsubstep, - ) - epost.nomodel[I] = err(nothing) - ## Smagorinsky - err = create_relerr_post(; - data, - setup, - psolver, - method, - closure_model = smagorinsky_closure(setup), - nsubstep, - ) - epost.smag[I] = err(θ_smag[I]) - ## CNN - err = create_relerr_post(; - data, - setup, - psolver, - method, - closure_model = wrappedclosure(closure, setup), - nsubstep, - ) - epost.cnn_prior[I] = err(device(θ_cnn_prior[ig, ifil])) - epost.cnn_post[I] = err(device(θ_cnn_post[I])) - clean() - end - jldsave(joinpath(outdir, "epost.jld2"); epost...) -end - -epost = namedtupleload(joinpath(outdir, "epost.jld2")) - -epost.nomodel -epost.smag -epost.cnn_prior -epost.cnn_post - -########################################################################## #src - -# ### Plot a-priori errors - -with_theme(; palette) do - doplot() || return - fig = Figure(; size = (500, 300)) - axes = [] - for (ifil, Φ) in enumerate(params.filters) - ax = Axis( - fig[1, ifil]; - xscale = log10, - xticks = params.nles, - xlabel = "Resolution", - # title = "Relative a-priori error $(ifil == 1 ? " (FA)" : " (VA)")", - # title = "$(Φ isa FaceAverage ? "FA" : "VA")", - title = "A-priori error", - # ylabelvisible = ifil == 1, - # yticksvisible = ifil == 1, - # yticklabelsvisible = ifil == 1, - ) - for (e, marker, label, color) in [ - (eprior.nomodel, :circle, "No closure", Cycled(1)), - (eprior.prior[:, ifil], :utriangle, "CNN (prior)", Cycled(2)), - (eprior.post[:, ifil, 1], :diamond, "CNN (post, DCF)", Cycled(3)), - ] - scatterlines!(params.nles, e; marker, color, label) - end - # axislegend(; position = :lb) - # ylims!(ax, (T(-0.05), T(1.05))) - push!(axes, ax) - end - Legend(fig[1, end+1], axes[1]) - save("$plotdir/eprior.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ### Plot a-posteriori errors - -with_theme(; palette) do - doplot() || return - fig = Figure(; size = (500, 300)) - # linestyles = [:solid, :dash] - linestyles = [:solid] - for (iorder, projectorder) in enumerate(projectorders) - lesmodel = projectorder == ProjectOrder.First ? "DIF" : "DCF" - (; nles) = params - ax = Axis( - fig[1, iorder]; - xscale = log10, - yscale = log10, - xticks = nles, - xlabel = "Resolution", - title = "A-posteriori error", - ) - for (e, marker, label, color) in [ - (epost.nomodel, :circle, "No closure", Cycled(1)), - (epost.smag, :utriangle, "Smagorinsky", Cycled(2)), - (epost.cnn_prior, :rect, "CNN (Lprior)", Cycled(3)), - (epost.cnn_post, :diamond, "CNN (Lpost)", Cycled(4)), - ] - for (ifil, linestyle) in enumerate(linestyles) - ifil == 2 && (label = nothing) - scatterlines!(nles, e[:, ifil, iorder]; color, linestyle, marker, label) - end - end - # axislegend(ax; position = :lb) - # ylims!(ax, (T(0.025), T(1.00))) - end - # g = GridLayout(fig[1, end+1]) - # Legend(g[1, 1], filter(x -> x isa Axis, fig.content)[1]; valign = :bottom) - # Legend( - # g[2, 1], - # map(s -> LineElement(; color = :black, linestyle = s), linestyles), - # ["FA", "VA"]; - # orientation = :horizontal, - # valign = :top, - # ) - # rowsize!(g, 1, Relative(1 / 2)) - # rowgap!(g, 0) - Legend(fig[1, end+1], filter(x -> x isa Axis, fig.content)[1]) - # Legend( - # fig[end+1, :], - # filter(x -> x isa Axis, fig.content)[1]; - # orientation = :horizontal, - # ) - save("$plotdir/epost.pdf", fig) - display(fig) -end - -# ## Plot both - -with_theme(; palette) do - doplot() || return - fig = Figure(; size = (800, 300)) - axes = [] - ifil = 1 - iorder = 1 - axprior = Axis( - fig[1, ifil]; - xscale = log10, - xticks = params.nles, - xlabel = "Resolution", - title = "A-priori error", - ) - for (e, marker, label, color) in [ - (eprior.nomodel, :circle, "No closure", Cycled(1)), - (eprior.prior[:, ifil], :rect, "CNN (prior)", Cycled(3)), - (eprior.post[:, ifil, 1], :diamond, "CNN (post)", Cycled(4)), - ] - scatterlines!(axprior, params.nles, e; marker, color, label) - end - axpost = Axis( - fig[1, 2]; - xscale = log10, - # yscale = log10, - xticks = params.nles, - xlabel = "Resolution", - title = "A-posteriori error", - ) - for (e, marker, label, color) in [ - (epost.nomodel, :circle, "No closure", Cycled(1)), - (epost.smag, :utriangle, "Smagorinsky", Cycled(2)), - (epost.cnn_prior, :rect, "CNN (Lprior)", Cycled(3)), - (epost.cnn_post, :diamond, "CNN (Lpost)", Cycled(4)), - ] - scatterlines!(axpost, params.nles, e[:, ifil, iorder]; color, marker, label) - end - # linkaxes!(axprior, axpost) - Legend(fig[1, end+1], axpost) - save("$plotdir/epriorandpost.pdf", fig) - display(fig) -end - -########################################################################## #src - -# ## Energy evolution - -# ### Compute total kinetic energy as a function of time - -let - s = length(params.nles), length(params.filters), length(projectorders) - keys = [:ref, :nomodel, :smag, :cnn_prior, :cnn_post] - divergencehistory = (; map(k -> k => fill(Point2f[], s), keys)...) - energyhistory = (; map(k -> k => fill(Point2f[], s), keys)...) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters), - (ig, nles) in enumerate(params.nles) - - I = CartesianIndex(ig, ifil, iorder) - @info "Computing divergence and kinetic energy" projectorder Φ nles - setup = getsetup(; params, nles) - psolver = default_psolver(setup) - sample = namedtupleload(getdatafile(outdir, nles, Φ, dns_seeds_test[1])) - ustart = selectdim(sample.u, ndims(sample.u), 1) |> collect |> device - T = eltype(ustart) - - # Shorter time for DIF - t_DIF = T(1) - nupdate = 1 - - # Reference trajectories - divergencehistory.ref[I] = let - div = scalarfield(setup) - udev = vectorfield(setup) - it = 1:nupdate:length(sample.t) - map(it) do it - t = sample.t[it] - u = selectdim(sample.u, ndims(sample.u), it) |> Array - copyto!(udev, u) - IncompressibleNavierStokes.divergence!(div, udev, setup) - d = view(div, setup.grid.Ip) - d = sum(abs2, d) / length(d) - d = sqrt(d) - Point2f(t, d) - end - end - energyhistory.ref[I] = let - it = 1:nupdate:length(sample.t) - udev = vectorfield(setup) - map(it) do it - t = sample.t[it] - u = selectdim(sample.u, ndims(sample.u), it) |> Array - copyto!(udev, u) - Point2f(t, total_kinetic_energy(udev, setup)) - end - end - - writer = processor() do state - div = scalarfield(setup) - dhist = Point2f[] - ehist = zeros(Point2f, 0) - on(state) do (; u, t, n) - if n % nupdate == 0 - IncompressibleNavierStokes.divergence!(div, u, setup) - d = view(div, setup.grid.Ip) - d = sum(abs2, d) / length(d) - d = sqrt(d) - push!(dhist, Point2f(t, d)) - e = total_kinetic_energy(u, setup) - push!(ehist, Point2f(t, e)) - end - end - state[] = state[] # Compute initial divergence - (; dhist, ehist) - end - - for (sym, closure_model, θ) in [ - (:nomodel, nothing, nothing), - (:smag, smagorinsky_closure(setup), θ_smag[I]), - (:cnn_prior, wrappedclosure(closure, setup), device(θ_cnn_prior[ig, ifil])), - (:cnn_post, wrappedclosure(closure, setup), device(θ_cnn_post[I])), - ] - _, results = solve_unsteady(; - setup = (; setup..., closure_model), - ustart, - tlims = ( - sample.t[1], - projectorder == ProjectOrder.First ? t_DIF : sample.t[end], - ), - Δt_min = T(1e-5), - method = RKProject(params.method, projectorder), - processors = (; writer, logger = timelogger(; nupdate = 500)), - psolver, - θ, - ) - divergencehistory[sym][I] = results.writer.dhist - energyhistory[sym][I] = results.writer.ehist - end - end - jldsave(joinpath(outdir, "history.jld2"); energyhistory, divergencehistory) - clean() -end - -(; divergencehistory, energyhistory) = namedtupleload(joinpath(outdir, "history.jld2")); - -########################################################################## #src - -# Check that energy is within reasonable bounds -energyhistory.ref .|> extrema -energyhistory.nomodel .|> extrema -energyhistory.smag .|> extrema -energyhistory.cnn_prior .|> extrema -energyhistory.cnn_post .|> extrema - -# Check that divergence is within reasonable bounds -divergencehistory.ref .|> extrema -divergencehistory.nomodel .|> extrema -divergencehistory.smag .|> extrema -divergencehistory.cnn_prior .|> extrema -divergencehistory.cnn_post .|> extrema - -########################################################################## #src - -# ### Plot energy evolution - -with_theme(; palette) do - doplot() || return - for (igrid, nles) in enumerate(params.nles) - @info "Plotting energy evolution" nles - fig = Figure(; size = (500, 300)) - g = GridLayout(fig[1, 1]) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters) - - I = CartesianIndex(igrid, ifil, iorder) - subfig = g[ifil, iorder] - ax = Axis( - subfig; - xlabel = "t", - ylabel = "E(t)", - title = "Kinetic energy", - titlevisible = ifil == 1, - ) - # xlims!(ax, (1e-2, 5.0)) - # xlims!(ax, (0.0, 1.0)) - # ylims!(ax, (1.3, 2.3)) - plots = [ - (energyhistory.nomodel, :solid, 1, "No closure"), - (energyhistory.smag, :solid, 2, "Smagorinsky"), - (energyhistory.cnn_prior, :solid, 3, "CNN (prior)"), - (energyhistory.cnn_post, :solid, 4, "CNN (post)"), - (energyhistory.ref, :dash, 1, "Reference"), - ] - for (p, linestyle, i, label) in plots - lines!(ax, p[I]; color = Cycled(i), linestyle, label) - # iorder == 1 && xlims!(-0.05, 1.05) - # iorder == 1 && ylims!(1.1, 3.1) - # ylims!(1.3, 3.0) - end - - # Plot zoom-in box - dobox = false - if dobox - tlims = (0.3, 0.4) - i1 = findfirst(p -> p[1] > tlims[1], energyhistory.ref[I]) - i2 = findfirst(p -> p[1] > tlims[2], energyhistory.ref[I]) - tlims = energyhistory.ref[I][i1][1], energyhistory.ref[I][i2][1] - klims = energyhistory.ref[I][i1][2], energyhistory.ref[I][i2][2] - dk = abs(klims[2] - klims[1]) - # klims = klims[1] - 0.2 * dk, klims[2] + 0.2 * dk - w = 0.2 - klims = minimum(klims) + w * dk, maximum(klims) - w * dk - box = [ - Point2f(tlims[1], klims[1]), - Point2f(tlims[2], klims[1]), - Point2f(tlims[2], klims[2]), - Point2f(tlims[1], klims[2]), - Point2f(tlims[1], klims[1]), - ] - lines!(ax, box; color = :black) - ax2 = Axis( - subfig; - # bbox = BBox(0.8, 0.9, 0.2, 0.3), - width = Relative(0.35), - height = Relative(0.35), - halign = 0.95, - valign = 0.95, - limits = (tlims..., klims...), - xscale = log10, - yscale = log10, - xticksvisible = false, - xticklabelsvisible = false, - xgridvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - ygridvisible = false, - backgroundcolor = :white, - ) - # https://discourse.julialang.org/t/makie-inset-axes-and-their-drawing-order/60987/5 - translate!(ax2.scene, 0, 0, 10) - translate!(ax2.elements[:background], 0, 0, 9) - for (p, linestyle, i, label) in plots - lines!(ax2, p[igrid, ifil, iorder]; color = Cycled(i), linestyle, label) - end - end - - # Label( - # g[ifil, 0], - # # Φ isa FaceAverage ? "Face-average" : "Volume-average"; - # Φ isa FaceAverage ? "FA" : "VA"; - # # halign = :right, - # font = :bold, - # # rotation = pi/2, - # tellheight = false, - # ) - end - # colgap!(g, 10) - # rowgap!(g, 10) - # colsize!(g, 1, Relative(1 / 5)) - Legend(fig[:, end+1], filter(x -> x isa Axis, fig.content)[1]) - name = "$plotdir/energy_evolution/" - ispath(name) || mkpath(name) - save("$(name)/nles=$(nles).pdf", fig) - display(fig) - end -end - -########################################################################## #src - -# ### Plot Divergence - -with_theme(; palette) do - doplot() || return - islog = true - for (igrid, nles) in enumerate(params.nles) - @info "Plotting divergence" nles - fig = Figure(; size = (500, 300)) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters) - - I = CartesianIndex(igrid, ifil, iorder) - subfig = fig[ifil, iorder] - ax = Axis( - subfig; - yscale = islog ? log10 : identity, - xlabel = "t", - # ylabel = Φ isa FaceAverage ? "Face-average" : "Volume-average", - # ylabelfont = :bold, - title = "Divergence", - # titlevisible = ifil == 1, - # xlabelvisible = ifil == 2, - # xticksvisible = ifil == 2, - # xticklabelsvisible = ifil == 2, - # ylabelvisible = iorder == 1, - # yticksvisible = iorder == 1, - # yticklabelsvisible = iorder == 1, - ) - lines!(ax, divergencehistory.nomodel[I]; label = "No closure") - lines!(ax, divergencehistory.smag[I]; label = "Smagorinsky") - lines!(ax, divergencehistory.cnn_prior[I]; label = "CNN (prior)") - lines!(ax, divergencehistory.cnn_post[I]; label = "CNN (post)") - lines!( - ax, - divergencehistory.ref[I]; - color = Cycled(1), - linestyle = :dash, - label = "Reference", - ) - islog && ylims!(ax, (T(1e-6), T(1e3))) - iorder == 1 && xlims!(ax, (-0.05, 1.05)) - end - rowgap!(fig.layout, 10) - colgap!(fig.layout, 10) - Legend(fig[:, end+1], filter(x -> x isa Axis, fig.content)[1]) - name = "$plotdir/divergence/" - ispath(name) || mkpath(name) - save("$(name)/nles=$(nles).pdf", fig) - display(fig) - end -end - -########################################################################## #src - -# ## Solutions at different times - -let - s = length(params.nles), length(params.filters), length(projectorders) - temp = zeros(T, ntuple(Returns(0), params.D + 1)) - keys = [:ref, :nomodel, :smag, :cnn_prior, :cnn_post] - times = T[0.1, 0.5, 1.0, 3.0] - itime_max_DIF = 3 - times_exact = copy(times) - utimes = map(t -> (; map(k -> k => fill(temp, s), keys)...), times) - for (iorder, projectorder) in enumerate(projectorders), - (ifil, Φ) in enumerate(params.filters), - (igrid, nles) in enumerate(params.nles) - - @info "Computing test solutions" projectorder Φ nles - I = CartesianIndex(igrid, ifil, iorder) - setup = getsetup(; params, nles) - psolver = default_psolver(setup) - sample = namedtupleload(getdatafile(outdir, nles, Φ, dns_seeds_test[1])) - ustart = sample.u[1] - ustart = selectdim(sample.u, ndims(sample.u), 1) |> collect - t = sample.t - solve(ustart, tlims, closure_model, θ) = - solve_unsteady(; - setup = (; setup..., closure_model), - ustart = device(ustart), - tlims, - method = RKProject(params.method, projectorder), - psolver, - θ, - )[1].u |> Array - t1 = t[1] - for i in eachindex(times) - # Only first times for First - i > itime_max_DIF && projectorder == ProjectOrder.First && continue - - # Time interval - t0 = t1 - t1 = times[i] - - # Adjust t1 to be exactly on a reference time - it = findfirst(>(t1), t) - if isnothing(it) - # Not found: Final time - it = length(t) - end - t1 = t[it] - tlims = (t0, t1) - times_exact[i] = t1 - - getprev(i, sym) = i == 1 ? ustart : utimes[i-1][sym][I] - - # Compute fields - utimes[i].ref[I] = selectdim(sample.u, ndims(sample.u), it) |> collect - utimes[i].nomodel[I] = solve(getprev(i, :nomodel), tlims, nothing, nothing) - utimes[i].smag[I] = - solve(getprev(i, :smag), tlims, smagorinsky_closure(setup), θ_smag[I]) - utimes[i].cnn_prior[I] = solve( - getprev(i, :cnn_prior), - tlims, - wrappedclosure(closure, setup), - device(θ_cnn_prior[igrid, ifil]), - ) - utimes[i].cnn_post[I] = solve( - getprev(i, :cnn_post), - tlims, - wrappedclosure(closure, setup), - device(θ_cnn_post[I]), - ) - end - clean() - end - jldsave("$outdir/solutions.jld2"; u = utimes, t = times_exact, itime_max_DIF) - clean() -end; - -# Load solution -solutions = namedtupleload("$outdir/solutions.jld2"); - -########################################################################## #src - -# ### Plot spectra -# -# Plot kinetic energy spectra. - -with_theme(; palette) do - doplot() || return - for (ifil, Φ) in enumerate(params.filters), (igrid, nles) in enumerate(params.nles) - @info "Plotting spectra" Φ nles - fig = Figure(; size = (800, 300)) - setup = getsetup(; params, nles) - (; Ip) = setup.grid - (; inds, κ, K) = IncompressibleNavierStokes.spectral_stuff(setup) - kmax = maximum(κ) - allaxes = [] - for (iorder, projectorder) in enumerate(projectorders) - rowaxes = [] - for (itime, t) in enumerate(solutions.t) - itime == 1 && continue # Skip first time - # Only first time for First - projectorder == ProjectOrder.First && - itime > solutions.itime_max_DIF && - continue - - fields = map( - k -> solutions.u[itime][k][igrid, ifil, iorder] |> device, - [:ref, :nomodel, :smag, :cnn_prior, :cnn_post], - ) - specs = map(fields) do u - state = (; u) - spec = observespectrum(state; setup) - spec.ehat[] - end - ## Build inertial slope above energy - logkrange = [0.55 * log(kmax), 0.85 * log(kmax)] - krange = exp.(logkrange) - # slope, slopelabel = -T(3), L"$\kappa^{-3}$" - slope, slopelabel = -T(5 / 3), L"$\kappa^{-5/3}$" - slopeconst = maximum(specs[1] ./ κ .^ slope) - offset = 0.4 - inertia = offset .* slopeconst .* krange .^ slope - ## Nice ticks - logmax = round(Int, log2(kmax + 1)) - # xticks = T(2) .^ (0:logmax) - if logmax > 5 - xticks = T[1, 4, 16, 64, 256] - else - xticks = T[1, 2, 4, 8, 16, 32] - end - ## Make plot - irow = projectorder == ProjectOrder.First ? 2 : 1 - icol = itime - 1 - subfig = fig[irow, icol] - ax = Axis( - subfig; - xticks, - xlabel = "κ", - xscale = log2, - yscale = log10, - yticksvisible = icol == 1, - yticklabelsvisible = icol == 1, - limits = (1, kmax, T(1e-8), T(1)), - title = irow == 1 ? "t = $(round(t; digits = 1))" : "", - ) - - # Plot zoom-in box - k1, k2 = klims = extrema(κ) - center = 0.8 - dk = 0.025 - logklims = (center - dk) * log(k2), (center + dk) * log(k2) - k1, k2 = klims = exp.(logklims) - i1 = findfirst(>(k1), κ) - i2 = findfirst(>(k2), κ) - elims = specs[1][i1], specs[1][i2] - loge1, loge2 = log.(elims) - de = (loge1 - loge2) * 0.05 - logelims = loge1 + de, loge2 - de - elims = exp.(logelims) - box = [ - Point2f(klims[1], elims[1]), - Point2f(klims[2], elims[1]), - Point2f(klims[2], elims[2]), - Point2f(klims[1], elims[2]), - Point2f(klims[1], elims[1]), - ] - ax_zoom = Axis( - subfig; - width = Relative(0.45), - height = Relative(0.4), - halign = 0.05, - valign = 0.05, - limits = (klims..., reverse(elims)...), - xscale = log10, - yscale = log10, - xticksvisible = false, - xticklabelsvisible = false, - xgridvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - ygridvisible = false, - backgroundcolor = :white, - ) - # https://discourse.julialang.org/t/makie-inset-axes-and-their-drawing-order/60987/5 - translate!(ax_zoom.scene, 0, 0, 10) - translate!(ax_zoom.elements[:background], 0, 0, 9) - - # Plot lines in both axes - for ax in (ax, ax_zoom) - lines!(ax, κ, specs[2]; color = Cycled(1), label = "No model") - lines!(ax, κ, specs[3]; color = Cycled(2), label = "Smagorinsky") - lines!(ax, κ, specs[4]; color = Cycled(3), label = "CNN (prior)") - lines!(ax, κ, specs[5]; color = Cycled(4), label = "CNN (post)") - lines!( - ax, - κ, - specs[1]; - color = Cycled(1), - linestyle = :dash, - label = "Reference", - ) - lines!( - ax, - krange, - inertia; - color = Cycled(1), - label = slopelabel, - linestyle = :dot, - ) - end - - # Show box in main plot - lines!(ax, box; color = :black) - - # axislegend(ax; position = :lb) - autolimits!(ax) - - push!(allaxes, ax) - push!(rowaxes, ax) - end - linkaxes!(rowaxes...) - end - # linkaxes!(allaxes...) - # linkaxes!(filter(x -> x isa Axis, fig.content)...) - Legend( - fig[1, end+1], - fig.content[1]; - tellwidth = false, - tellheight = false, - # width = Auto(false), - # height = Auto(false), - # halign = :left, - # framevisible = false, - ) - Label(fig[0, 1:end], "Energy spectra"; valign = :bottom, font = :bold) - rowgap!(fig.layout, 10) - colgap!(fig.layout, 10) - # ylims!(ax, (T(1e-3), T(0.35))) - specdir = "$plotdir/spectra/" - ispath(specdir) || mkpath(specdir) - name = splatfileparts(; filter = Φ, nles) - save("$specdir/$name.pdf", fig) - display(fig) - end -end - -########################################################################## #src - -# ### Plot fields - -with_theme(; palette) do - doplot() || return - ## Reference box for eddy comparison - x1 = 0.3 - x2 = 0.5 - y1 = 0.5 - y2 = 0.7 - box = [ - Point2f(x1, y1), - Point2f(x2, y1), - Point2f(x2, y2), - Point2f(x1, y2), - Point2f(x1, y1), - ] - for (ifil, Φ) in enumerate(params.filters) - Φ isa FaceAverage || continue - # fig = Figure(; size = (710, 400)) - fig = Figure(; size = (770, 360)) - irow = 0 - itime = 3 - for (igrid, nles) in enumerate(params.nles) - itime == 1 && (nles in [32, 64] || continue) - itime == 3 && (nles in [64, 128] || continue) - # nles in [128, 256] || continue - irow += 1 - setup = getsetup(; params, nles) - (; Ip, xp) = setup.grid - xplot = xp[1][2:end-1], xp[2][2:end-1] - xplot = xplot .|> Array - # lesmodel = iorder == 1 ? "DIF" : "DCF" - # fig = fieldplot( - # (; u, temp = nothing, t = T(0)); - # setup, - # title, - # docolorbar = false, - # size = (500, 500), - # ) - - utime = solutions.u[itime] - icol = 0 - - for (u, title) in [ - (utime.nomodel[igrid, ifil, 2], "No closure"), - (utime.nomodel[igrid, ifil, 2], "Smagorinsky (DCF)"), - # (utime.cnn_prior[igrid, ifil, 2], "CNN (prior, DCF)"), - (utime.cnn_post[igrid, ifil, 1], "CNN (post, DIF)"), - (utime.cnn_post[igrid, ifil, 2], "CNN (post, DCF)"), - (utime.ref[igrid, ifil, 2], "Reference"), - ] - icol += 1 - u = device(u) - w = vorticity(u, setup) - w = interpolate_ω_p(w, setup) - w = w[Ip] |> Array - colorrange = get_lims(w) - ax = Axis( - fig[irow, icol]; - title, - xticksvisible = false, - xticklabelsvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - ylabel = "n = $nles", - ylabelvisible = icol == 1, - titlevisible = irow == 1, - aspect = DataAspect(), - ) - heatmap!(ax, xplot..., w; colorrange) - lines!(ax, box; linewidth = 3, color = Cycled(2)) # Red in palette - end - end - fil = Φ isa FaceAverage ? "face-average" : "volume-average" - tlab = round(solutions.t[itime]; digits = 1) - Label(fig[0, 1:end], "Vorticity ($fil, t = $tlab)"; valign = :bottom, font = :bold) - colgap!(fig.layout, 10) - rowgap!(fig.layout, 10) - display(fig) - path = "$plotdir/les_fields" - ispath(path) || mkpath(path) - name = splatfileparts(; itime, filter = Φ) - name = joinpath(path, name) - fname = "$(name).png" - save(fname, fig) - end -end - -# Plot vorticity -let - doplot() || return - nles = 64 - sample = namedtupleload(getdatafile(outdir, nles, FaceAverage(), dns_seeds_test[1])) - setup = getsetup(; params, nles) - u = sample.u[1] |> device - w = vorticity(u, setup) |> Array |> Observable - title = sample.t[1] |> string |> Observable - fig = heatmap(w; axis = (; title)) - for i = 1:5:1000 - u = sample.u[i] |> device - w[] = vorticity(u, setup) |> Array - title[] = "t = $(round(sample.t[i]; digits = 2))" - display(fig) - sleep(0.05) - end -end diff --git a/lib/PaperDC/prioranalysis.jl b/lib/PaperDC/prioranalysis.jl deleted file mode 100644 index 4bd6df41e..000000000 --- a/lib/PaperDC/prioranalysis.jl +++ /dev/null @@ -1,467 +0,0 @@ -# # A-priori analysis: Filtered DNS (2D or 3D) -# -# This script is used to generate results for the the paper [Agdestein2025](@citet). -# -# - Generate filtered DNS data -# - Compute quantities for different filters - -@info "Script started" - -if false #src - include("src/PaperDC.jl") #src -end #src - -# Output directory -output = joinpath(@__DIR__, "output", "prioranalysis") -logdir = joinpath(output, "logs") -ispath(output) || mkpath(output) -ispath(logdir) || mkpath(logdir) - -# ## Configure logger - -using PaperDC -using Dates - -# Write output to file, as the default SLURM file is not updated often enough -isslurm = haskey(ENV, "SLURM_JOB_ID") -if isslurm - jobid = parse(Int, ENV["SLURM_JOB_ID"]) - logfile = "job=$(jobid)_$(Dates.now()).out" -else - logfile = "log_$(Dates.now()).out" -end -logfile = joinpath(logdir, logfile) -setsnelliuslogger(logfile) - -@info "# A-posteriori analysis: Forced turbulence (2D)" - -# ## Load packages - -using CairoMakie -using CUDA -# using GLMakie -using IncompressibleNavierStokes -using IncompressibleNavierStokes: apply_bc_u!, ode_method_cache -using JLD2 -using NeuralClosure -using PaperDC -using Printf -using Random - -# ## Hardware selection - -if CUDA.functional() - ## For running on GPU - CUDA.allowscalar(false) - backend = CUDABackend() - clean() = (GC.gc(); CUDA.reclaim()) # This seems to be needed to free up memory -else - ## For running on CPU - backend = CPU() - clean() = nothing -end - -# ## Setup - -# 2D configuration -case = let - T = Float64 - case = (; - T, - D = 2, - ndns = 4096, - Re = T(1e4), - kp = 20, - tlims = (T(0), T(1e0)), - docopy = true, - bodyforce = (dim, x, y, t) -> (dim == 1) * 5 * sinpi(8 * y), - issteadybodyforce = true, - nles = [32, 64, 128, 256], - filterdefs = [FaceAverage(), VolumeAverage()], - name, - ) - (; case..., name = "D=$(case.D)_T=$(T)_Re=$(case.Re)_t=$(case.tlims[2])") -end - -# 3D configuration -case = let - T = Float32 - case = (; - T, - D = 3, - ndns = 1024, # Works on a 80GB H100 GPU. Use smaller n for less memory. - Re = T(6e3), - kp = 20, - tlims = (T(0), T(1e0)), - bodyforce = (dim, x, y, z, t) -> (dim == 1) * 5 * sinpi(8 * y), - issteadybodyforce = true, - docopy = false, - nles = [32, 64, 128, 256], - filterdefs = [FaceAverage(), VolumeAverage()], - ) - (; case..., name = "D=$(case.D)_T=$(case.T)_Re=$(case.Re)_t=$(case.tlims[2])") -end - -casedir = joinpath(output, case.name) -ispath(casedir) || mkpath(casedir) - -# Setup -lims = case.T(0), case.T(1) -dns = let - setup = Setup(; - x = ntuple(α -> range(lims..., case.ndns + 1), case.D), - case.Re, - case.bodyforce, - case.issteadybodyforce, - backend, - ) - psolver = default_psolver(setup) - (; setup, psolver) -end; -filters = map(Iterators.product(case.nles, case.filterdefs)) do (nles, Φ) - compression = case.ndns ÷ nles - setup = Setup(; - x = ntuple(α -> range(lims..., nles + 1), case.D), - case.Re, - case.bodyforce, - case.issteadybodyforce, - backend, - ) - psolver = default_psolver(setup) - (; setup, Φ, compression, psolver) -end; - -# Create random initial conditions -rng = Xoshiro(12345) -ustart = random_field(dns.setup, case.T(0); case.kp, dns.psolver, rng); -clean() - -# Compute initial spectrum since we will overwrite ustart to save memory -specstart = let - state = (; u = ustart) - spec = observespectrum(state; dns.setup) - (; spec.κ, ehat = spec.ehat[]) -end -clean() - -# Save initial conditions -@info "Saving initial conditions" -save_object("$casedir/ustart.jld2", Array(ustart)) - -# Solve unsteady problem -@info "Starting time stepping" -state, outputs = let - method = RKMethods.Wray3(; case.T) - cache = ode_method_cache(method, dns.setup) - solve_unsteady(; - dns.setup, - ustart, - case.tlims, - case.docopy, # leave initial conditions unchanged, false to free up memory - method, - cache, - dns.psolver, - processors = ( - obs = observe_u( - dns.setup, - dns.psolver, - filters; - PF = cache.ku[1], - p = cache.p, - nupdate = 20, - ), - log = timelogger(; nupdate = 5), - ), - ) -end; -clean() - -# Save final velocity -@info "Starting final velocity" -save_object("$casedir/uend.jld2", Array(state.u)) - -# ## Plot 2D fields - -case.D == 2 && with_theme() do - @info "Plotting 2D fields" - (; T) = case - - ## Compute quantities - for fil in filters - apply_bc_u!(state.u, T(0), dns.setup) - Φu = fil.Φ(state.u, fil.setup, fil.compression) - apply_bc_u!(Φu, T(0), fil.setup) - Fv = momentum(Φu, nothing, T(0), fil.setup) - apply_bc_u!(Fv, T(0), fil.setup) - PFv = project(Fv, fil.setup; psolver = fil.psolver) - apply_bc_u!(PFv, T(0), fil.setup) - F = momentum(state.u, nothing, T(0), dns.setup) - apply_bc_u!(F, T(0), dns.setup) - PF = project(F, dns.setup; dns.psolver) - apply_bc_u!(PF, T(0), dns.setup) - ΦPF = fil.Φ(PF, fil.setup, fil.compression) - apply_bc_u!(ΦPF, T(0), fil.setup) - c = ΦPF - PFv - apply_bc_u!(c, T(0), fil.setup) - - ## Make plots - fields = [ - (ustart, dns.setup, "u₀"), - (c, fil.setup, "c(u)"), - (state.u, dns.setup, "u"), - (PF, dns.setup, "PF(u)"), - (Φu, fil.setup, "ū"), - (PFv, fil.setup, "P̄F̄(ū)"), - ] - fig = Figure(; size = (600, 450)) - for (I, field) in enumerate(fields) - f, setup, title = field - (; Ip, xp) = setup.grid - i, j = CartesianIndices((2, 3))[I].I - w = vorticity(f, setup) - # w = f[1] |> Array - w = w[Ip] |> Array - lims = get_lims(w) - xw = xp[1][Ip.indices[1]], xp[2][Ip.indices[2]] - xw = Array.(xw) - heatmap( - fig[i, j], - xw..., - w; - colorrange = lims, - axis = (; - title, - xticksvisible = false, - xticklabelsvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - aspect = DataAspect(), - ), - ) - end - display(fig) - name = "$casedir/fields_filter=$(fil.Φ)_nles=$(fil.setup.grid.Np[1]).png" - save(name, fig; px_per_unit = 2) - end -end - -# ## Plot 3D fields - -# Contour plots in 3D only work with GLMakie. -# For using GLMakie on headless servers, see -# -# GLMakie.activate!() - -# Make plots -dovolumeplot = false && D == 3 -dovolumeplot && with_theme() do - @info "Plotting 3D fields" - function makeplot(field, setup, name) - name = "$casedir/$name.png" - save( - name, - fieldplot( - (; u = field, t = T(0)); - setup, - fieldname = :eig2field, - levels = LinRange(T(4), T(12), 10), - docolorbar = false, - size = (600, 600), - ), - ) - try - ## Trim whitespace with ImageMagick - run(`magick $name -trim $name`) - catch e - @warn """ - ImageMagick not found. - Skipping image trimming. - Install from . - """ - end - end - makeplot(u₀, dns.setup, "start") # Requires docopy = true in solve - makeplot(state.u, dns.setup, "end") - i = 3 - makeplot( - filters[i].Φ(state.u, filters[i].setup, filters[i].compression), - filters[i].setup, - "end_filtered", - ) -end - -# ## Compute average quantities - -open("$casedir/averages.txt", "w") do io - println(io, "Φ\t\tM\tDu\tPv\tPc\tc\tE") - for o in outputs.obs - nt = length(o.t) - Dv = sum(o.Dv) / nt - Pc = sum(o.Pc) / nt - Pv = sum(o.Pv) / nt - c = sum(o.c) / nt - E = sum(o.E) / nt - @printf( - io, - "%s\t%d^%d\t%.2g\t%.2g\t%.2g\t%.2g\t%.2g\n", - ## "%s &\t\$%d^%d\$ &\t\$%.2g\$ &\t\$%.2g\$ &\t\$%.2g\$ &\t\$%.2g\$ &\t\$%.2g\$\n", - typeof(o.Φ), - o.Mα, - case.D, - Dv, - Pv, - Pc, - c, - E, - ) - end -end - -# ## Plot spectra - -let - fields = [state.u, map(f -> f.Φ(state.u, f.setup, f.compression), filters)...] - setups = [dns.setup, getfield.(filters, :setup)...] - specs = map(fields, setups) do u, setup - clean() # Free up memory - state = (; u) - spec = observespectrum(state; setup) - (; spec.κ, ehat = spec.ehat[]) - end - pushfirst!(specs, specstart) - save_object("$casedir/spectra.jld2", specs) -end - -specs = load_object("$casedir/spectra.jld2") - -with_theme(; palette = (; color = ["#3366cc", "#cc0000", "#669900", "#ff9900"])) do - (; D, T) = case - - ## Build inertial slope above energy - krange, slope, slopelabel = if D == 2 - [T(8), T(50)], -T(3), L"$\kappa^{-3}$" - elseif D == 3 - # [T(80), T(256)], -T(5 / 3), L"$\kappa^{-5/3}$" - [T(8), T(46)], -T(5 / 3), L"$\kappa^{-5/3}$" - end - slopeconst = maximum(specs[2].ehat ./ specs[2].κ .^ slope) - offset = D == 2 ? 3 : 0.7 - inertia = offset .* slopeconst .* krange .^ slope - - ## Nice ticks - kmax = maximum(specs[1].κ) - logmax = round(Int, log2(kmax + 1)) - xticks = T(2) .^ (0:logmax) - - ## Make plot - fig = Figure(; size = (500, 400)) - ax = Axis( - fig[1, 1]; - xticks, - xlabel = "κ", - xscale = log10, - yscale = log10, - limits = (1, kmax, T(1e-8), T(1)), - title = "Energy spectrum ($(D)D)", - ) - plotparts(i) = specs[i].κ, specs[i].ehat - nnles = length(case.nles) - FA = 3:2+nnles - VA = 3+nnles:2+2*nnles - - # lines!(ax, plotparts(1)...; color = Cycled(4), label = "DNS, t = 0") - lines!(ax, plotparts(2)...; color = Cycled(1), label = "DNS") - for i in FA - label = i == FA[1] ? "Filtered DNS (FA)" : nothing - lines!(ax, plotparts(i)...; color = Cycled(2), label) - end - for i in VA - label = i == VA[1] ? "Filtered DNS (VA)" : nothing - lines!(ax, plotparts(i)...; color = Cycled(3), label) - end - lines!(ax, krange, inertia; color = Cycled(1), label = slopelabel, linestyle = :dash) - axislegend( - ax; - position = :lb, - # position = (0.2, 0.01), - ) - autolimits!(ax) - if D == 2 - xlims!(ax, T(0.8), T(460)) - ylims!(ax, T(1e-10), T(1e0)) - elseif D == 3 - # xlims!(ax, 0.8, 290) - xlims!(ax, 0.8, 460) - ylims!(ax, T(1e-11), T(2e-1)) - end - - # Add resolution numbers just below plots - if D == 2 - text!(ax, "4096"; position = (175, 1.5e-10)) - textk, texte = 1.5, 2.0 - elseif D == 3 - # text!(ax, "1024"; position = (241, 2.4e-8)) - # text!(ax, "1024"; position = (259, 2.4e-5)) - # text!(ax, "1024"; position = (110, 1.5e-11)) - text!(ax, "1024"; position = (208, 1.3e-11)) - textk, texte = 1.50, 2.3 - end - for (i, nles) in zip(VA, case.nles) - κ, e = plotparts(i) - text!(ax, "$nles"; position = (κ[end] / textk, e[end] / texte)) - end - - # Plot zoom-in box - if D == 2 - o = 6 - sk, se = 1.06, 1.4 - x1, y1 = 477, 358 - x0, y0 = x1 - 90, y1 - 94 - elseif D == 3 - o = 7 - sk, se = 1.05, 1.3 - # x1, y1 = 360, 185 - x1, y1 = 477, 358 - x0, y0 = x1 - 90, y1 - 94 - end - kk, ee = plotparts(FA[end]) - kk, ee = kk[end-o], ee[end-o] - k0, k1 = kk / sk, kk * sk - e0, e1 = ee / se, ee * se - limits = (k0, k1, e0, e1) - lines!( - ax, - [ - Point2f(k0, e0), - Point2f(k1, e0), - Point2f(k1, e1), - Point2f(k0, e1), - Point2f(k0, e0), - ]; - color = :black, - linewidth = 1.5, - ) - ax2 = Axis( - fig; - bbox = BBox(x0, x1, y0, y1), - limits, - xscale = log10, - yscale = log10, - xticksvisible = false, - xticklabelsvisible = false, - xgridvisible = false, - yticksvisible = false, - yticklabelsvisible = false, - ygridvisible = false, - backgroundcolor = :white, - ) - # https://discourse.julialang.org/t/makie-inset-axes-and-their-drawing-order/60987/5 - translate!(ax2.scene, 0, 0, 10) - translate!(ax2.elements[:background], 0, 0, 9) - lines!(ax2, plotparts(2)...; color = Cycled(1)) - lines!(ax2, plotparts(FA[end])...; color = Cycled(2)) - lines!(ax2, plotparts(VA[end])...; color = Cycled(3)) - - save("$casedir/spectra.pdf", fig) - fig -end diff --git a/lib/PaperDC/src/PaperDC.jl b/lib/PaperDC/src/PaperDC.jl deleted file mode 100644 index 02dc8c24c..000000000 --- a/lib/PaperDC/src/PaperDC.jl +++ /dev/null @@ -1,88 +0,0 @@ -""" -Utility functions for scripts. -""" -module PaperDC - -using Accessors -using Adapt -using Dates -using DocStringExtensions -using EnumX -using LinearAlgebra -using IncompressibleNavierStokes -using IncompressibleNavierStokes: - momentum!, divergence!, project!, apply_bc_u!, kinetic_energy!, scalewithvolume! -using JLD2 -using LoggingExtras -using Lux -using MLUtils -using NeuralClosure -using Observables -using Optimisers -using Random - -"Write output to file, as the default SLURM file is not updated often enough." -function setsnelliuslogger(logfile) - filelogger = MinLevelLogger(FileLogger(logfile), Logging.Info) - logger = TeeLogger(ConsoleLogger(), filelogger) - oldlogger = global_logger(logger) - @info """ - Logging to file: $logfile - - Starting at $(Dates.now()). - - Last commit: - - $(cd(() -> read(`git log -n 1`, String), @__DIR__)) - """ - oldlogger -end - -# Inherit docstring templates -@template (MODULES, FUNCTIONS, METHODS, TYPES) = IncompressibleNavierStokes - -"Load JLD2 file as named tuple (instead of dict)." -function namedtupleload(file) - dict = load(file) - k, v = keys(dict), values(dict) - pairs = @. Symbol(k) => v - (; pairs...) -end - -""" -Make file name from parts. - -```@example -julia> splatfileparts("toto", 23; haha = 1e3, hehe = "hihi") -"toto_23_haha=1000.0_hehe=hihi" -``` -""" -function splatfileparts(args...; kwargs...) - sargs = string.(args) - skwargs = map((k, v) -> string(k) * "=" * string(v), keys(kwargs), values(kwargs)) - s = [sargs..., skwargs...] - join(s, "_") -end - -getsetup(; params, nles) = Setup(; - x = ntuple(α -> range(params.lims..., nles + 1), params.D), - params.Re, - params.backend, - params.bodyforce, - params.issteadybodyforce, -) - -include("observe.jl") -include("rk.jl") -include("train.jl") - -export setsnelliuslogger -export namedtupleload, splatfileparts -export observe_u, observe_v -export ProjectOrder, RKProject -export getdatafile, createdata, getsetup -export trainprior, loadprior -export trainpost, loadpost -export trainsmagorinsky, loadsmagorinsky - -end # module PaperDC diff --git a/lib/PaperDC/src/observe.jl b/lib/PaperDC/src/observe.jl deleted file mode 100644 index 05ba965a6..000000000 --- a/lib/PaperDC/src/observe.jl +++ /dev/null @@ -1,88 +0,0 @@ -function observe_v(dnsobs, Φ, les, compression, psolver) - (; grid) = les - (; dimension, N, Iu, Ip) = grid - D = dimension() - Mα = N[1] - 2 - v = vectorfield(les) - Pv = vectorfield(les) - p = scalarfield(les) - div = scalarfield(les) - ΦPF = vectorfield(les) - PFΦ = vectorfield(les) - c = vectorfield(les) - T = eltype(v) - results = (; - Φ, - Mα, - t = zeros(T, 0), - Dv = zeros(T, 0), - Pv = zeros(T, 0), - Pc = zeros(T, 0), - c = zeros(T, 0), - E = zeros(T, 0), - ) - on(dnsobs) do (; u, PF, t, E) - push!(results.t, t) - - Φ(v, u, les, compression) - apply_bc_u!(v, t, les) - Φ(ΦPF, PF, les, compression) - momentum!(PFΦ, v, nothing, t, les) - apply_bc_u!(PFΦ, t, les; dudt = true) - project!(PFΦ, les; psolver, p) - @. c = ΦPF - PFΦ - apply_bc_u!(c, t, les) - divergence!(div, v, les) - norm_Du = norm(div[Ip]) - norm_v = sqrt(sum(α -> sum(abs2, v[Iu[α], α]), 1:D)) - push!(results.Dv, norm_Du / norm_v) - - copyto!(Pv, v) - project!(Pv, les; psolver, p) - @. Pv = Pv - v - norm_vmPv = sqrt(sum(α -> sum(abs2, Pv[Iu[α], α]), 1:D)) - push!(results.Pv, norm_vmPv / norm_v) - - Pc = Pv - copyto!(Pc, c) - project!(Pc, les; psolver, p) - @. Pc = Pc - c - norm_cmPc = sqrt(sum(α -> sum(abs2, Pc[Iu[α], α]), 1:D)) - norm_c = sqrt(sum(α -> sum(abs2, c[Iu[α], α]), 1:D)) - push!(results.Pc, norm_cmPc / norm_c) - - norm_ΦPF = sqrt(sum(α -> sum(abs2, ΦPF[Iu[α], α]), 1:D)) - push!(results.c, norm_c / norm_ΦPF) - - kinetic_energy!(p, v, les) - scalewithvolume!(p, les) - Ev = sum(view(p, Ip)) - - push!(results.E, Ev / E) - end - results -end - -observe_u(dns, psolver_dns, filters; PF, p, nupdate = 1) = - processor() do state - # PF = zero.(state[].u) - # p = zero(state[].u[1]) - dnsobs = Observable((; state[].u, PF, state[].t, E = zero(eltype(p)))) - results = - map(f -> observe_v(dnsobs, f.Φ, f.setup, f.compression, f.psolver), filters) - on(state) do (; u, t, n) - n % nupdate == 0 || return - apply_bc_u!(u, t, dns) - momentum!(PF, u, nothing, t, dns) - apply_bc_u!(PF, t, dns; dudt = true) - project!(PF, dns; psolver = psolver_dns, p) - - kinetic_energy!(p, u, dns) - scalewithvolume!(p, dns) - E = sum(view(p, dns.grid.Ip)) - - dnsobs[] = (; u, PF, t, E) - end - # state[] = state[] # Save initial conditions - results - end diff --git a/lib/PaperDC/src/rk.jl b/lib/PaperDC/src/rk.jl deleted file mode 100644 index 675308e71..000000000 --- a/lib/PaperDC/src/rk.jl +++ /dev/null @@ -1,166 +0,0 @@ -"Encode projection order for use in [`RKProject`](@ref)." -@enumx ProjectOrder begin - "Project RHS before applying closure term." - First = 1 - - "Project solution instead of RHS (same as `rk`)." - Last = 2 - - "Project RHS after applying closure term." - Second = 3 -end - -""" - RKProject(rk, order) - -Runge-Kutta method with different projection order than the default Runge-Kutta method. -""" -struct RKProject{T,R} <: IncompressibleNavierStokes.AbstractODEMethod{T} - "Runge-Kutta method, for example `RKMethods.RK44()`." - rk::R - - "Projection order, see [`ProjectOrder`](@ref)." - projectorder::ProjectOrder.T - - RKProject(rk, projectorder) = new{eltype(rk.A),typeof(rk)}(rk, projectorder) -end - -IncompressibleNavierStokes.ode_method_cache(method::RKProject, setup) = - IncompressibleNavierStokes.ode_method_cache(method.rk, setup) - -IncompressibleNavierStokes.create_stepper( - method::RKProject; - setup, - psolver, - u, - temp, - t, - n = 0, -) = IncompressibleNavierStokes.create_stepper(method.rk; setup, psolver, u, temp, t, n) - -function IncompressibleNavierStokes.timestep!( - method::RKProject, - stepper, - Δt; - θ = nothing, - cache, -) - (; setup, psolver, u, temp, t, n) = stepper - (; closure_model) = setup - (; rk, projectorder) = method - (; A, b, c) = rk - (; ustart, ku, p) = cache - nstage = length(b) - m = closure_model - @assert projectorder ∈ instances(ProjectOrder.T) "Unknown projectorder: $projectorder" - - # Update current solution - tstart = t - copyto!(ustart, u) - - for i = 1:nstage - # Compute force at current stage i - apply_bc_u!(u, t, setup) - momentum!(ku[i], u, temp, t, setup) - - # Project F first - if projectorder == ProjectOrder.First - apply_bc_u!(ku[i], t, setup; dudt = true) - project!(ku[i], setup; psolver, p) - end - - # Add closure term - isnothing(m) || (ku[i] .+= m(u, θ)) - - # Project F second - if projectorder == ProjectOrder.Second - apply_bc_u!(ku[i], t, setup; dudt = true) - project!(ku[i], setup; psolver, p) - end - - # Intermediate time step - t = tstart + c[i] * Δt - - # Apply stage forces - u .= ustart - for j = 1:i - @. u += Δt * A[i, j] * ku[j] - end - - # Project stage u directly - # Make velocity divergence free at time t - if projectorder == ProjectOrder.Last - apply_bc_u!(u, t, setup) - project!(u, setup; psolver, p) - end - end - - # This is redundant, but Neumann BC need to have _exact_ copies - # since we divide by an infinitely thin (eps(T)) volume width in the - # diffusion term - apply_bc_u!(u, t, setup) - - IncompressibleNavierStokes.create_stepper(method; setup, psolver, u, temp, t, n = n + 1) -end - -function IncompressibleNavierStokes.timestep(method::RKProject, stepper, Δt; θ = nothing) - (; setup, psolver, u, temp, t, n) = stepper - (; closure_model) = setup - (; rk, projectorder) = method - (; A, b, c) = rk - nstage = length(b) - m = closure_model - @assert projectorder ∈ instances(ProjectOrder.T) "Unknown projectorder: $projectorder" - - # Update current solution (does not depend on previous step size) - tstart = t - ustart = u - ku = () - - for i = 1:nstage - # Compute force at current stage i - u = IncompressibleNavierStokes.apply_bc_u(u, t, setup) - F = IncompressibleNavierStokes.momentum(u, temp, t, setup) - - # Project F first - if projectorder == ProjectOrder.First - F = IncompressibleNavierStokes.apply_bc_u(F, t, setup; dudt = true) - F = IncompressibleNavierStokes.project(F, setup; psolver) - end - - # Add closure term - isnothing(m) || (F = F + m(u, θ)) - - # Project F second - if projectorder == ProjectOrder.Second - F = IncompressibleNavierStokes.apply_bc_u(F, t, setup; dudt = true) - F = IncompressibleNavierStokes.project(F, setup; psolver) - end - - # Store right-hand side of stage i - ku = (ku..., F) - - # Intermediate time step - t = tstart + c[i] * Δt - - # Apply stage forces - u = ustart - for j = 1:i - u = @. u + Δt * A[i, j] * ku[j] - end - - # Project stage u directly - # Make velocity divergence free at time t - if projectorder == ProjectOrder.Last - u = IncompressibleNavierStokes.apply_bc_u(u, t, setup) - u = IncompressibleNavierStokes.project(u, setup; psolver) - end - end - - # This is redundant, but Neumann BC need to have _exact_ copies - # since we divide by an infinitely thin (eps(T)) volume width in the - # diffusion term - u = IncompressibleNavierStokes.apply_bc_u(u, t, setup) - - IncompressibleNavierStokes.create_stepper(method; setup, psolver, u, temp, t, n = n + 1) -end diff --git a/lib/PaperDC/src/train.jl b/lib/PaperDC/src/train.jl deleted file mode 100644 index 9d46da222..000000000 --- a/lib/PaperDC/src/train.jl +++ /dev/null @@ -1,351 +0,0 @@ -getdatafile(outdir, nles, filter, seed) = - joinpath(outdir, "data", splatfileparts(; seed = repr(seed), filter, nles) * ".jld2") - -"Create data files." -createdata(; params, seeds, outdir, taskid) = - for (iseed, seed) in enumerate(seeds) - if isnothing(taskid) || iseed == taskid - @info "Creating DNS trajectory for seed $(repr(seed))" - else - # Each task does one initial condition - @info "Skipping seed $(repr(seed)) for task $taskid" - continue - end - filenames = map(Iterators.product(params.nles, params.filters)) do (nles, Φ) - f = getdatafile(outdir, nles, Φ, seed) - datadir = dirname(f) - ispath(datadir) || mkpath(datadir) - f - end - data = create_les_data(; params..., rng = Xoshiro(seed), filenames) - @info( - "Trajectory info:", - data[1].comptime / 60, - length(data[1].t), - Base.summarysize(data) * 1e-9, - ) - end - -getpriorfile(outdir, nles, filter) = - joinpath(outdir, "priortraining", splatfileparts(; filter, nles) * ".jld2") - -"Load a-priori training results from correct file names." -loadprior(outdir, nles, filters) = map( - splat((nles, Φ) -> load_object(getpriorfile(outdir, nles, Φ))), - Iterators.product(nles, filters), -) - -"Train with a-priori loss." -function trainprior(; - params, - priorseed, - dns_seeds_train, - dns_seeds_valid, - taskid, - outdir, - plotdir, - closure, - θ_start, - opt, - λ = nothing, - noiselevel = nothing, - scheduler = nothing, - nvalid, - batchsize, - displayref, - displayupdates, - nupdate_callback, - loadcheckpoint, - nepoch, - niter, -) - device(x) = adapt(params.backend, x) - itotal = 0 - for Φ in params.filters, nles in params.nles - itotal += 1 - if isnothing(taskid) || itotal == taskid - @info "Training a-priori" Φ nles - else - # Each task does one training - @info "Skipping a-priori training for iteration $(itotal) != $(taskid)" - continue - end - starttime = time() - priorfile = getpriorfile(outdir, nles, Φ) - priordir = dirname(priorfile) - ispath(priordir) || mkpath(priordir) - figdir = joinpath(plotdir, "priortraining") - ispath(figdir) || mkpath(figdir) - figfile = joinpath(figdir, splitext(basename(priorfile))[1] * ".pdf") - checkfile = join(splitext(priorfile), "_checkpoint") - batchseed, validseed = splitseed(priorseed, 2) # Same seed for all training setups - setup = getsetup(; params, nles) - data_train = - map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_train) - data_valid = - map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_valid) - io_train = create_io_arrays(data_train, setup) - io_valid = create_io_arrays(data_valid, setup) - # dataloader = create_dataloader_prior(io_train; batchsize, device) - θ = device(θ_start) - loss = create_loss_prior(closure) - optstate = Optimisers.setup(opt, θ) - it = rand(Xoshiro(validseed), 1:size(io_valid.u, params.D + 2), nvalid) - validset = device(map(v -> collect(selectdim(v, ndims(v), it)), io_valid)) - (; callbackstate, callback) = create_callback( - create_relerr_prior(closure, validset...); - θ, - displayref, - displayupdates, - figfile, - nupdate = nupdate_callback, - ) - if loadcheckpoint - # Resume from checkpoint - (; icheck, trainstate, callbackstate) = namedtupleload(checkfile) - @assert eltype(callbackstate.θmin) == Float32 "gpu_device() only works with Float32" - trainstate = trainstate |> gpu_device() - @reset callbackstate.θmin = callbackstate.θmin |> gpu_device() - else - icheck = 0 - trainstate = (; optstate, θ, rng = Xoshiro(batchseed)) - callbackstate = callback(callbackstate, trainstate) # Initial callback - end - for iepoch = icheck+1:nepoch - # (; trainstate, callbackstate) = train(; - # dataloader, - # loss, - # trainstate, - # scheduler, - # callbackstate, - # callback, - # niter, - # ) - dataloader = DataLoader( - (io_train.u, io_train.c); - batchsize, - trainstate.rng, - buffer = true, - shuffle = true, - parallel = false, - partial = false, - ) - (; trainstate, callbackstate) = trainepoch(; - dataloader, - loss, - trainstate, - callbackstate, - callback, - device, - noiselevel, - λ, - ) - if !isnothing(scheduler) - eta = scheduler(iepoch) - Optimisers.adjust!(trainstate.optstate, eta) - end - # Save all states to resume training later - # First move all arrays to CPU - c = callbackstate |> cpu_device() - t = trainstate |> cpu_device() - jldsave(checkfile; icheck = iepoch, callbackstate = c, trainstate = t) - end - θ = callbackstate.θmin # Use best θ instead of last θ - results = (; θ = Array(θ), comptime = time() - starttime, callbackstate.hist) - save_object(priorfile, results) - end - @info "Finished a-priori training." -end - -getpostfile(outdir, nles, filter, projectorder) = - joinpath(outdir, "posttraining", splatfileparts(; projectorder, filter, nles) * ".jld2") - -"Load a-posteriori training results from correct file names." -loadpost(outdir, nles, filters, projectorders) = map( - splat((nles, Φ, o) -> load_object(getpostfile(outdir, nles, Φ, o))), - Iterators.product(nles, filters, projectorders), -) - -"Train with a-posteriori loss function." -function trainpost(; - params, - projectorders, - outdir, - plotdir, - taskid, - postseed, - dns_seeds_train, - dns_seeds_valid, - nsubstep, - nunroll, - ntrajectory, - closure, - θ_start, - opt, - λ = nothing, - scheduler, - nunroll_valid, - nupdate_callback, - displayref, - displayupdates, - loadcheckpoint, - nepoch, - niter, -) - device(x) = adapt(params.backend, x) - itotal = 0 - for projectorder in projectorders, - (ifil, Φ) in enumerate(params.filters), - (igrid, nles) in enumerate(params.nles) - - itotal += 1 - if isnothing(taskid) || itotal == taskid - @info "Training a-posteriori" projectorder Φ nles - else - # Each task does one training - @info "Skipping a-posteriori training for iteration $(itotal) != $(taskid)" - continue - end - starttime = time() - postfile = getpostfile(outdir, nles, Φ, projectorder) - ispath(dirname(postfile)) || mkpath(dirname(postfile)) - figdir = joinpath(plotdir, "posttraining") - ispath(figdir) || mkpath(figdir) - figfile = joinpath(figdir, splitext(basename(postfile))[1] * ".pdf") - checkfile = join(splitext(postfile), "_checkpoint") - setup = getsetup(; params, nles) - psolver = default_psolver(setup) - loss = create_loss_post(; - setup, - psolver, - method = RKProject(params.method, projectorder), - closure_model = wrappedclosure(closure, setup), - nsubstep, # Time steps per loss evaluation - ) - data_train = - map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_train) - data_valid = - map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_valid) - dataloader = create_dataloader_post( - map(d -> (; d.u, d.t), data_train); - device, - nunroll, - ntrajectory, - ) - θ = θ_start[igrid, ifil] |> device - optstate = Optimisers.setup(opt, θ) - (; callbackstate, callback) = let - d = data_valid[1] - it = 1:nunroll_valid - data = - (; u = selectdim(d.u, ndims(d.u), it) |> collect |> device, t = d.t[it]) - create_callback( - create_relerr_post(; - data, - setup, - psolver, - method = RKProject(params.method, projectorder), - closure_model = wrappedclosure(closure, setup), - nsubstep, - ); - θ, - figfile, - displayref, - displayupdates, - nupdate = nupdate_callback, - ) - end - if loadcheckpoint - @info "Resuming from checkpoint $checkfile" - (; icheck, trainstate, callbackstate) = namedtupleload(checkfile) - @assert eltype(callbackstate.θmin) == Float32 "gpu_device() only works with Float32" - trainstate = trainstate |> gpu_device() - @reset callbackstate.θmin = callbackstate.θmin |> gpu_device() - else - icheck = 0 - trainstate = (; optstate, θ, rng = Xoshiro(postseed)) - callbackstate = callback(callbackstate, trainstate) # Initial callback - end - for iepoch = icheck+1:nepoch - (; trainstate, callbackstate) = - train(; dataloader, loss, trainstate, niter, callbackstate, callback, λ) - if !isnothing(scheduler) - eta = scheduler(iepoch) - Optimisers.adjust!(trainstate.optstate, eta) - end - @info "Saving checkpoint to $(basename(checkfile))" - c = callbackstate |> cpu_device() - t = trainstate |> cpu_device() - jldsave(checkfile; icheck = iepoch, callbackstate = c, trainstate = t) - end - θ = callbackstate.θmin # Use best θ instead of last θ - results = (; θ = Array(θ), comptime = time() - starttime) - save_object(postfile, results) - end - @info "Finished a-posteriori training." -end - -getsmagfile(outdir, nles, filter, projectorder) = - joinpath(outdir, "smagorinsky", splatfileparts(; projectorder, filter, nles) * ".jld2") - -loadsmagorinsky(outdir, nles, filters, projectorders) = map( - splat((nles, Φ, o) -> load_object(getsmagfile(outdir, nles, Φ, o))), - Iterators.product(nles, filters, projectorders), -) - -function trainsmagorinsky(; - params, - projectorders, - outdir, - dns_seeds_train, - taskid, - nunroll, - nsubstep, - ninfo, - θrange, -) - device(x) = adapt(params.backend, x) - itotal = 0 - for projectorder in projectorders, Φ in params.filters, nles in params.nles - itotal += 1 - if isnothing(taskid) || itotal == taskid - @info "Training Smagorinsky" projectorder Φ nles - else - # Each task does one training - @info "Skipping Smagorinsky training for iteration $(itotal) != $(taskid)" - continue - end - starttime = time() - T = typeof(params.Re) - smagfile = getsmagfile(outdir, nles, Φ, projectorder) - smagdir = dirname(smagfile) - ispath(smagdir) || mkpath(smagdir) - setup = getsetup(; params, nles) - psolver = default_psolver(setup) - d = namedtupleload(getdatafile(outdir, nles, Φ, dns_seeds_train[1])) - it = 1:nunroll - data = (; u = selectdim(d.u, ndims(d.u), it) |> collect |> device, t = d.t[it]) - θmin = T(0) - emin = T(Inf) - err = create_relerr_post(; - data, - setup, - psolver, - method = RKProject(params.method, projectorder), - closure_model = IncompressibleNavierStokes.smagorinsky_closure_natural(setup), - nsubstep, # Number of time steps between t[i] and t[i + 1] - ) - for (iθ, θ) in enumerate(θrange) - iθ % ninfo == 0 && @info "Testing θ = $θ" - e = err(θ) - if e < emin - emin = e - θmin = θ - end - end - @info "Optimal θ = $θmin" - results = (; θ = θmin, comptime = time() - starttime) - save_object(smagfile, results) - end - @info "Finished Smagorinsky training." -end diff --git a/lib/PaperDC/transferfunctions.jl b/lib/PaperDC/transferfunctions.jl deleted file mode 100644 index cb8bffd89..000000000 --- a/lib/PaperDC/transferfunctions.jl +++ /dev/null @@ -1,217 +0,0 @@ -using GLMakie -using CairoMakie - -GLMakie.activate!() - -palette = (; color = ["#3366cc", "#cc0000", "#669900", "#ffcc00"]) - -plotdir = joinpath(@__DIR__, "output", "transferfunctions") -ispath(plotdir) || mkpath(plotdir) - -G(k, Δ) = sinpi(k * Δ / 2) / (π * k * Δ / 2) - -n = 6 -# k = logrange(1, 2^n, n + 1) -k = LinRange(0, 2^n, 200) - -Δ = 1 / 2^(n + 1) -g = @. G(k, Δ) -g[1] = 1 # Override division by zero - -kx = reshape(k, :) -ky = reshape(k, 1, :) -kz = reshape(k, 1, 1, :) -kdiag = @. sqrt(2) * k -kdiagdiag = @. sqrt(3) * k -idiag = findfirst(kdiag .> k[end]) -idiagdiag = findfirst(kdiagdiag .> k[end]) -kdiag = kdiag[1:idiag] -kdiagdiag = kdiagdiag[1:idiagdiag] -gx = reshape(g, :) -gy = reshape(g, 1, :) -gz = reshape(g, 1, 1, :) - -VA2D = @. gx * gy -FAx2D = @. one(gx) * gy -FAy2D = @. gx * one(gy) -VA2D_diag = diag(VA2D)[1:idiag] -FAx2D_diag = map(i -> FAx2D[i, i], 1:idiag) -FAy2D_diag = map(i -> FAy2D[i, i], 1:idiag) -VA3D = @. gx * gy * gz -FAx3D = @. one(gx) * gy * gz -FAx3D_diag = map(i -> FAx3D[i, i, 1], 1:idiag) -FAx3D_diagdiag = map(i -> FAx3D[i, i, i], 1:idiagdiag) - -with_theme(; palette) do - fig = Figure(; size = (1300, 700)) - - Label(fig[0, 1:3], "Transfer functions"; valign = :bottom, font = :bold) - - ax = Axis3( - fig[1, 1]; - xlabel = "k₁", - ylabel = "k₂", - zlabel = "Damping", - azimuth = π / 4, - title = "G and G¹ (2D)", - ) - surface!(ax, k, k, VA2D) - surface!(ax, k, k, FAx2D; alpha = 0.5) - - ax = Axis3( - fig[1, 2]; - xlabel = "k₁", - ylabel = "k₂", - zlabel = "Damping", - azimuth = π / 4, - title = "G and G² (2D)", - ) - surface!(ax, k, k, VA2D) - surface!(ax, k, k, FAy2D; alpha = 0.5) - - isovalue = 0.95 - kwargs = (; algorithm = :iso, isorange = 0.002, isovalue) - lims = (0, 64) - ax = Axis3( - fig[1, 3]; - xlabel = "k₁", - ylabel = "k₂", - zlabel = "k₃", - azimuth = π / 4, - title = "Isocontours at $isovalue for G and G¹ (3D)", - aspect = :data, - # limits = (lims, lims, lims), - ) - volume!(ax, k, k, k, VA3D; kwargs...) - volume!(ax, k, k, k, FAx3D; kwargs..., alpha = 0.5) - - ax = Axis( - fig[2, 1]; - # xscale = log2, - # yscale = log2, - xlabel = "|| k ||", - title = "Sections of G and G¹ (2D)", - ) - lines!(ax, k, VA2D[1, :]; color = Cycled(2), label = "G") - lines!(ax, k, FAx2D[:, 1]; color = Cycled(1), linestyle = :solid, label = "G¹, k₂ = 0") - lines!( - ax, - kdiag, - FAx2D_diag; - color = Cycled(1), - linestyle = :dash, - label = "G¹, k₁ = k₂", - ) - lines!( - ax, - k, - FAx2D[1, :]; - color = Cycled(1), - linewidth = 3, - linestyle = :dot, - label = "G¹, k₁ = 0", - ) - axislegend(ax; position = :lb) - - ax = Axis( - fig[2, 2]; - # xscale = log2, - # yscale = log2, - xlabel = "|| k ||", - title = "Sections of G and G² (2D)", - ) - lines!(ax, k, VA2D[1, :]; color = Cycled(2), label = "G") - lines!(ax, k, FAy2D[1, :]; color = Cycled(1), linestyle = :solid, label = "G², k₁ = 0") - lines!( - ax, - kdiag, - FAy2D_diag; - color = Cycled(1), - linestyle = :dash, - label = "G², k₁ = k₂", - ) - lines!( - ax, - k, - FAy2D[:, 1]; - color = Cycled(1), - linewidth = 3, - linestyle = :dot, - label = "G², k₂ = 0", - ) - axislegend(ax; position = :lb) - - ax = Axis( - fig[2, 3]; - # xscale = log2, - # yscale = log2, - xlabel = "|| k ||", - title = "Sections of G and G¹ (3D)", - ) - lines!(ax, k, VA3D[:, 1, 1]; color = Cycled(2), label = "G") - lines!( - ax, - k, - FAx3D[:, 1, 1]; - color = Cycled(1), - linestyle = :solid, - label = "G¹, k₂ = k₃ = 0", - ) - lines!( - ax, - kdiag, - FAx3D_diag; - color = Cycled(1), - linestyle = :dash, - label = "G¹, k₁ = k₂, k₃ = 0", - ) - lines!( - ax, - kdiagdiag, - FAx3D_diagdiag; - color = Cycled(1), - linestyle = :dashdotdot, - label = "G¹, k₁ = k₂ = k₃", - ) - lines!( - ax, - k, - FAx3D[1, :, 1]; - color = Cycled(1), - linewidth = 3, - linestyle = :dot, - label = "G¹, k₁ = 0", - ) - axislegend(ax; position = :lb) - - save("$plotdir/transferfunctions.png", fig; px_per_unit = 1.5) - - fig -end - -# save("$plotdir/transferfunctions.png", current_figure(); px_per_unit = 1.5) - -GLMakie.activate!() - -let - isovalue = 0.95 - kwargs = (; algorithm = :iso, isorange = 0.002, isovalue) - fig = Figure() - lims = (0, 64) - ax = Axis3( - fig[1, 1]; - xlabel = "kx", - ylabel = "ky", - zlabel = "kz", - azimuth = π / 4, - title = "Isocontours at $isovalue for VA and FAx (3D)", - aspect = :data, - limits = (lims, lims, lims), - ) - # xlims!(0, 64) - # ylims!(0, 64) - # zlims!(0, 64) - volume!(ax, k, k, k, VA3D; kwargs...) - volume!(ax, k, k, k, FAx3D; kwargs..., alpha = 0.5) - fig -end From 05fc22580516bd0543a417235f47e0e216d0eae3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Syver=20D=C3=B8ving=20Agdestein?= Date: Tue, 14 Jan 2025 16:01:28 +0100 Subject: [PATCH 2/6] refactor: Excise SymmetryClosure from repo --- lib/SymmetryClosure/Project.toml | 59 --- lib/SymmetryClosure/README.md | 3 - lib/SymmetryClosure/generate_data.jl | 90 ---- lib/SymmetryClosure/src/SymmetryClosure.jl | 36 -- lib/SymmetryClosure/src/cases.jl | 27 -- lib/SymmetryClosure/src/setup.jl | 61 --- lib/SymmetryClosure/src/tensorclosure.jl | 115 ----- lib/SymmetryClosure/src/train.jl | 113 ----- lib/SymmetryClosure/symmetryanalysis.jl | 479 --------------------- lib/SymmetryClosure/test_dns.jl | 153 ------- lib/SymmetryClosure/test_equivariance.jl | 39 -- lib/SymmetryClosure/test_tensor.jl | 161 ------- lib/SymmetryClosure/test_zygote.jl | 38 -- lib/SymmetryClosure/train_models.jl | 122 ------ 14 files changed, 1496 deletions(-) delete mode 100644 lib/SymmetryClosure/Project.toml delete mode 100644 lib/SymmetryClosure/README.md delete mode 100644 lib/SymmetryClosure/generate_data.jl delete mode 100644 lib/SymmetryClosure/src/SymmetryClosure.jl delete mode 100644 lib/SymmetryClosure/src/cases.jl delete mode 100644 lib/SymmetryClosure/src/setup.jl delete mode 100644 lib/SymmetryClosure/src/tensorclosure.jl delete mode 100644 lib/SymmetryClosure/src/train.jl delete mode 100644 lib/SymmetryClosure/symmetryanalysis.jl delete mode 100644 lib/SymmetryClosure/test_dns.jl delete mode 100644 lib/SymmetryClosure/test_equivariance.jl delete mode 100644 lib/SymmetryClosure/test_tensor.jl delete mode 100644 lib/SymmetryClosure/test_zygote.jl delete mode 100644 lib/SymmetryClosure/train_models.jl diff --git a/lib/SymmetryClosure/Project.toml b/lib/SymmetryClosure/Project.toml deleted file mode 100644 index 969bb500c..000000000 --- a/lib/SymmetryClosure/Project.toml +++ /dev/null @@ -1,59 +0,0 @@ -name = "SymmetryClosure" -uuid = "1c19b3a0-f99b-4d07-bf81-942cc75cab0f" -authors = ["Syver Døving Agdestein "] -version = "1.0.0" - -[deps] -Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" -ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" -Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" -FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" -IncompressibleNavierStokes = "5e318141-6589-402b-868d-77d7df8c442e" -JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -Lux = "b2108857-7c20-44ae-9111-449ecde12c47" -LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda" -NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -NeuralClosure = "099dac27-d7f2-4047-93d5-0baee36b9c25" -Observables = "510215fc-4207-5dde-b226-833fc4488ee2" -Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" -ParameterSchedulers = "d7d3b36b-41b8-4d0d-a2bf-768c6151755e" -Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -WGLMakie = "276b4fcb-3e11-5398-bf8b-a0c2d153d008" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" - -[sources] -IncompressibleNavierStokes = {path = "../.."} -NeuralClosure = {path = "../NeuralClosure"} - -[compat] -Accessors = "0.1" -Adapt = "4" -CUDA = "5" -CairoMakie = "0.12" -ChainRulesCore = "1" -ComponentArrays = "0.15" -Dates = "1" -FFTW = "1" -IncompressibleNavierStokes = "2" -JLD2 = "0.5" -KernelAbstractions = "0.9" -LinearAlgebra = "1" -Lux = "1" -LuxCUDA = "0.3" -NNlib = "0.9" -NeuralClosure = "1" -Observables = "0.5" -Optimisers = "0.3, 0.4" -ParameterSchedulers = "0.4" -StaticArrays = "1" -WGLMakie = "0.10" -Zygote = "0.6, 0.7" -julia = "1.9" diff --git a/lib/SymmetryClosure/README.md b/lib/SymmetryClosure/README.md deleted file mode 100644 index ccbd485d3..000000000 --- a/lib/SymmetryClosure/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# SymmetryClosure - -Symmetry closure scripts. diff --git a/lib/SymmetryClosure/generate_data.jl b/lib/SymmetryClosure/generate_data.jl deleted file mode 100644 index 8e01414c9..000000000 --- a/lib/SymmetryClosure/generate_data.jl +++ /dev/null @@ -1,90 +0,0 @@ -# LSP hack (to get "go to definition" etc. working) #src -if false #src - include("src/SymmetryClosure.jl") #src - include("../NeuralClosure/src/NeuralClosure.jl") #src - include("../../src/IncompressibleNavierStokes.jl") #src - using .SymmetryClosure #src - using .NeuralClosure #src - using .IncompressibleNavierStokes #src -end #src - -########################################################################## #src - -# # Data generation -# -# This script is used to generate filtered DNS data. - -@info "# DNS data generation" - -########################################################################## #src - -# ## Setup - -using SymmetryClosure -using Random -using NeuralClosure -using JLD2 - -# Get SLURM specific variables (if any) -(; jobid, taskid) = slurm_vars() - -# Log info about current run -time_info() - -# Hardware selection -(; backend, device, clean) = hardware() - -# Test case -(; params, outdir, plotdir, seed_dns, ntrajectory) = testcase(backend) - -# DNS seeds -dns_seeds = splitseed(seed_dns, ntrajectory) - -########################################################################## #src - -# ## Create data - -for (iseed, seed) in enumerate(dns_seeds) - if isnothing(taskid) || iseed == taskid - @info "Creating DNS trajectory for seed $(repr(seed))" - else - # Each task does one initial condition - @info "Skipping seed $(repr(seed)) for task $taskid" - continue - end - filenames = map(Iterators.product(params.nles, params.filters)) do (nles, Φ) - f = getdatafile(outdir, nles, Φ, seed) - datadir = dirname(f) - ispath(datadir) || mkpath(datadir) - f - end - data = create_les_data(; params..., backend, rng = Xoshiro(seed), filenames) - @info( - "Trajectory info:", - data[1].comptime / 60, - length(data[1].t), - Base.summarysize(data) * 1e-9, - ) -end - -########################################################################## #src - -# Computational time - -docomp = true -docomp && let - comptime, datasize = 0.0, 0.0 - for seed in dns_seeds - comptime += load( - getdatafile(outdir, params.nles[1], params.filters[1], seed), - "comptime", - ) - end - for seed in dns_seeds, nles in params.nles, Φ in params.filters - data = namedtupleload(getdatafile(outdir, nles, Φ, seed)) - datasize += Base.summarysize(data) - end - @info "Data" comptime - @info "Data" comptime / 60 datasize * 1e-9 - clean() -end diff --git a/lib/SymmetryClosure/src/SymmetryClosure.jl b/lib/SymmetryClosure/src/SymmetryClosure.jl deleted file mode 100644 index adf10ef89..000000000 --- a/lib/SymmetryClosure/src/SymmetryClosure.jl +++ /dev/null @@ -1,36 +0,0 @@ -module SymmetryClosure - -using Accessors -using Adapt -using ChainRulesCore -using CUDA -using Dates -using IncompressibleNavierStokes -using JLD2 -using KernelAbstractions -using LinearAlgebra -using Lux -using NeuralClosure -using NNlib -using Optimisers -using Random -using StaticArrays - -include("tensorclosure.jl") -include("setup.jl") -include("cases.jl") -include("train.jl") - -export tensorclosure, polynomial, create_cnn -export slurm_vars, - time_info, - hardware, - splatfileparts, - getdatafile, - namedtupleload, - splitseed, - getsetup, - testcase -export trainpost - -end diff --git a/lib/SymmetryClosure/src/cases.jl b/lib/SymmetryClosure/src/cases.jl deleted file mode 100644 index 6ffe1e243..000000000 --- a/lib/SymmetryClosure/src/cases.jl +++ /dev/null @@ -1,27 +0,0 @@ -function testcase(backend) - basedir = haskey(ENV, "DEEPDIP") ? ENV["DEEPDIP"] : joinpath(@__DIR__, "..") - outdir = mkpath(joinpath(basedir, "output", "Kolmogorov2D")) - plotdir = mkpath(joinpath(outdir, "plots")) - seed_dns = 123 - ntrajectory = 8 - T = Float32 - params = (; - D = 2, - lims = (T(0), T(1)), - Re = T(6e3), - tburn = T(0.5), - tsim = T(3), - savefreq = 50, - ndns = 2048, - nles = [32, 64, 128], - filters = [FaceAverage()], - backend, - icfunc = (setup, psolver, rng) -> - random_field(setup, T(0); kp = 20, psolver, rng), - method = LMWray3(; T), - bodyforce = (dim, x, y, t) -> (dim == 1) * 5 * sinpi(8 * y), - issteadybodyforce = true, - processors = (; log = timelogger(; nupdate = 100)), - ) - (; outdir, plotdir, seed_dns, ntrajectory, params) -end diff --git a/lib/SymmetryClosure/src/setup.jl b/lib/SymmetryClosure/src/setup.jl deleted file mode 100644 index b8a15dbe7..000000000 --- a/lib/SymmetryClosure/src/setup.jl +++ /dev/null @@ -1,61 +0,0 @@ -# Some script utils - -function slurm_vars() - jobid = haskey(ENV, "SLURM_JOB_ID") ? parse(Int, ENV["SLURM_JOB_ID"]) : nothing - taskid = - haskey(ENV, "SLURM_ARRAY_TASK_ID") ? parse(Int, ENV["SLURM_ARRAY_TASK_ID"]) : - nothing - isnothing(jobid) || @info "Running on SLURM" jobid taskid - (; jobid, taskid) -end - -function time_info() - @info "Starting at $(Dates.now())" - @info """ - Last commit: - - $(cd(() -> read(`git log -n 1`, String), @__DIR__)) - """ -end - -hardware() = - if CUDA.functional() - @info "Running on CUDA" - CUDA.allowscalar(false) - backend = CUDABackend() - device = x -> adapt(backend, x) - clean = () -> (GC.gc(); CUDA.reclaim()) - (; backend, device, clean) - else - @warn """ - Running on CPU. - Consider reducing the size of DNS, LES, and CNN layers if - you want to test run on a laptop. - """ - (; backend = CPU(), device = identity, clean = () -> nothing) - end - -function splatfileparts(args...; kwargs...) - sargs = string.(args) - skwargs = map((k, v) -> string(k) * "=" * string(v), keys(kwargs), values(kwargs)) - s = [sargs..., skwargs...] - join(s, "_") -end - -getdatafile(outdir, nles, filter, seed) = - joinpath(outdir, "data", splatfileparts(; seed = repr(seed), filter, nles) * ".jld2") - -function namedtupleload(file) - dict = load(file) - k, v = keys(dict), values(dict) - pairs = @. Symbol(k) => v - (; pairs...) -end - -getsetup(; params, nles) = Setup(; - x = ntuple(α -> range(params.lims..., nles + 1), params.D), - params.Re, - params.backend, - params.bodyforce, - params.issteadybodyforce, -) diff --git a/lib/SymmetryClosure/src/tensorclosure.jl b/lib/SymmetryClosure/src/tensorclosure.jl deleted file mode 100644 index 35d64848f..000000000 --- a/lib/SymmetryClosure/src/tensorclosure.jl +++ /dev/null @@ -1,115 +0,0 @@ -function tensorclosure(u, θ, model, setup) - (; Δ) = setup.grid - @assert all(Δi -> all(≈(Δi[1]), Δi), Array.(Δ)) - Δx = Array(Δ[1])[1] - B, V = tensorbasis(u, setup) - x = model(V, θ) - x = x .* (Δx^2) - # τ = sum(x .* B; dims = ndims(B)) - τ = IncompressibleNavierStokes.lastdimcontract(x, B, setup) - τ = apply_bc_p(τ, zero(eltype(u)), setup) - IncompressibleNavierStokes.divoftensor(τ, setup) -end - -tensorclosure(model, setup) = (u, θ) -> tensorclosure(u, θ, model, setup) - -function polynomial(V, θ) - s..., nV = size(V) - V = eachslice(V; dims = ndims(V)) - basis = if nV == 2 - cat(V[1], V[2], V[1] .* V[2], V[1] .* V[1], V[2] .* V[2]; dims = length(s) + 1) - elseif nV == 5 - cat( - V[1], - V[2], - V[3], - V[4], - V[5], - V[1] .* V[1], - V[1] .* V[2], - V[1] .* V[3], - V[1] .* V[4], - V[1] .* V[5], - V[2] .* V[2], - V[2] .* V[3], - V[2] .* V[4], - V[2] .* V[5], - V[3] .* V[3], - V[3] .* V[4], - V[3] .* V[5], - V[4] .* V[4], - V[4] .* V[5], - V[5] .* V[5]; - dims = length(s) + 1, - ) - end - θ = reshape(θ, ntuple(Returns(1), length(s))..., size(θ)...) - x = sum(θ .* basis; dims = ndims(basis)) - reshape(x, s..., size(x, ndims(x))) -end - -function create_cnn(; setup, radii, channels, activations, use_bias, rng) - r, c, σ, b = radii, channels, activations, use_bias - (; grid) = setup - (; dimension, x) = grid - D = dimension() - T = eltype(x[1]) - - # dx = map(d -> d[2:end-1], Δu) - - # Weight initializer - glorot_uniform_T(rng::AbstractRNG, dims...) = glorot_uniform(rng, T, dims...) - - # Correct output channels - if D == 2 - @assert c[end] == 3 # 3 basis tensors - elseif D == 3 - @assert c[end] == 18 # 18 basis tensors - end - - cin = if D == 2 - 2 # 2 invariants - elseif D == 3 - 5 # 5 invariants - end - - # Add input channel size - c = [cin; c] - - # Add padding so that output has same shape as input - # padder = ntuple(α -> (u -> pad_circular(u, sum(r); dims = α)), D) - padder = u -> pad_circular(u, sum(r)) - - # Some convolutional layers - convs = map( - i -> Conv( - ntuple(α -> 2r[i] + 1, D), - c[i] => c[i+1], - σ[i]; - use_bias = b[i], - init_weight = glorot_uniform_T, - ), - eachindex(r), - ) - - # Create convolutional closure model - m, θ_start = NeuralClosure.create_closure(padder, convs...; rng) - - inside = setup.grid.Ip - - function cnn_coeffs(V, θ) - s..., nchan = size(V) - # Remove boundaries - if D == 2 - V = V[2:end-1, 2:end-1, :] - else - V = V[2:end-1, 2:end-1, 2:end-1, :] - end - V = reshape(V, size(V)..., 1) # Add sample dim - coeffs = m(V, θ) - coeffs = pad_circular(coeffs, 1) # Add boundaries - coeffs = reshape(coeffs, s..., :) # Remove sample dim - end - - cnn_coeffs, θ_start -end diff --git a/lib/SymmetryClosure/src/train.jl b/lib/SymmetryClosure/src/train.jl deleted file mode 100644 index 0e2dc753e..000000000 --- a/lib/SymmetryClosure/src/train.jl +++ /dev/null @@ -1,113 +0,0 @@ -function trainpost(; - params, - outdir, - plotdir, - taskid, - postseed, - dns_seeds_train, - dns_seeds_valid, - nsubstep, - nunroll, - ntrajectory, - closure_models, - θ_start, - opt, - λ = nothing, - scheduler, - nunroll_valid, - nupdate_callback, - displayref, - displayupdates, - loadcheckpoint, - nepoch, - niter, -) - device = x -> adapt(params.backend, x) - Φ = params.filters[1] - for (igrid, nles) in enumerate(params.nles) - igrid == 1 || continue - if isnothing(taskid) || igrid == taskid - @info "Training a-posteriori" nles - else - # Each task does one training - @info "Skipping a-posteriori training for iteration $(igrid) != $(taskid)" - continue - end - starttime = time() - closure_model = closure_models[igrid] - file = joinpath(outdir, "training", splatfileparts(; nles) * ".jld2") - ispath(dirname(file)) || mkpath(dirname(file)) - figdir = mkpath(joinpath(outdir, "training")) - figfile = joinpath(figdir, splitext(basename(file))[1] * ".pdf") - checkfile = join(splitext(file), "_checkpoint") - setup = getsetup(; params, nles) - psolver = default_psolver(setup) - loss = create_loss_post(; - setup, - psolver, - params.method, - closure_model, - nsubstep, # Time steps per loss evaluation - ) - data_train = - map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_train) - data_valid = - map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_valid) - dataloader = create_dataloader_post( - map(d -> (; d.u, d.t), data_train); - device, - nunroll, - ntrajectory, - ) - θ = θ_start |> device - optstate = Optimisers.setup(opt, θ) - (; callbackstate, callback) = let - d = data_valid[1] - it = 1:nunroll_valid - data = - (; u = selectdim(d.u, ndims(d.u), it) |> collect |> device, t = d.t[it]) - create_callback( - create_relerr_post(; - data, - setup, - psolver, - params.method, - closure_model, - nsubstep, - ); - θ, - figfile, - displayref, - displayupdates, - nupdate = nupdate_callback, - ) - end - if loadcheckpoint - @info "Resuming from checkpoint $checkfile" - (; icheck, trainstate, callbackstate) = namedtupleload(checkfile) - @assert eltype(callbackstate.θmin) == Float32 "gpu_device() only works with Float32" - trainstate = trainstate |> gpu_device() - @reset callbackstate.θmin = callbackstate.θmin |> gpu_device() - else - icheck = 0 - trainstate = (; optstate, θ, rng = Xoshiro(postseed)) - callbackstate = callback(callbackstate, trainstate) # Initial callback - end - for iepoch = icheck+1:nepoch - (; trainstate, callbackstate) = - train(; dataloader, loss, trainstate, niter, callbackstate, callback, λ) - if !isnothing(scheduler) - eta = scheduler(iepoch) - Optimisers.adjust!(trainstate.optstate, eta) - end - @info "Saving checkpoint to $(basename(checkfile))" - c = callbackstate |> cpu_device() - t = trainstate |> cpu_device() - jldsave(checkfile; icheck = iepoch, callbackstate = c, trainstate = t) - end - θ = callbackstate.θmin # Use best θ instead of last θ - results = (; θ = Array(θ), comptime = time() - starttime) - save_object(file, results) - end - @info "Finished a-posteriori training." -end diff --git a/lib/SymmetryClosure/symmetryanalysis.jl b/lib/SymmetryClosure/symmetryanalysis.jl deleted file mode 100644 index a362372b1..000000000 --- a/lib/SymmetryClosure/symmetryanalysis.jl +++ /dev/null @@ -1,479 +0,0 @@ -# LSP hack #src -if false #src - include("src/SymmetryClosure.jl") #src - include("../NeuralClosure/src/NeuralClosure.jl") #src - include("../../src/IncompressibleNavierStokes.jl") #src - using .SymmetryClosure #src - using .NeuralClosure #src - using .IncompressibleNavierStokes #src -end #src - -########################################################################## #src - -# # Symmetry-preserving closure models -# -# - Generate filtered DNS data -# - Define CNN closure models -# - Train all closure models in the same way -# - Compare errors and symmetry errors -# -# The filtered DNS data is saved and can be loaded in a subesequent session. -# The learned CNN parameters are also saved. - -# ## Load packages - -using Adapt -## using GLMakie -using CairoMakie -using IncompressibleNavierStokes -using JLD2 -using LinearAlgebra -using NeuralClosure -using NNlib -using Optimisers -using Random -using SymmetryClosure - -# Choose where to put output -## outdir = joinpath(@__DIR__, "output") -outdir = joinpath(@__DIR__, "output", "nobias") -plotdir = joinpath(outdir, "plots") -datadir = joinpath(outdir, "data") -ispath(plotdir) || mkpath(plotdir) -ispath(datadir) || mkpath(datadir) - -########################################################################## #src - -# ## Define random number generator seeds -# -# Use a new RNG with deterministic seed for each code "section" -# so that e.g. training batch selection does not depend on whether we -# generated fresh filtered DNS data or loaded existing one (the -# generation of which would change the state of a global RNG). -# -# Note: Using `rng = Random.default_rng()` twice seems to point to the -# same RNG, and mutating one also mutates the other. -# `rng = Xoshiro()` creates an independent copy each time. -# -# We define all the seeds here. - -seeds = (; - dns = 123, # Initial conditions - θ₀ = 234, # Initial CNN parameters - training = 345, # Training batch selection -) - -########################################################################## #src - -# ## Hardware selection - -# For running on CPU. -# Consider reducing the sizes of DNS, LES, and CNN layers if -# you want to test run on a laptop. -T = Float32 -backend = CPU() -device = identity -clean() = nothing - -# For running on a CUDA compatible GPU -using LuxCUDA -using CUDA; -T = Float32; -backend = CUDABackend(); -CUDA.allowscalar(false); -device = x -> adapt(CuArray, x) -clean() = (GC.gc(); CUDA.reclaim()) - -########################################################################## #src - -# ## Data generation -# -# Create filtered DNS data for training, validation, and testing. - -# Random number generator for initial conditions. -# Important: Created and seeded first, then shared for all initial conditions. -# After each initial condition generation, it is mutated and creates different -# IC for the next iteration. -rng = Xoshiro(seeds.dns) - -# Parameters -get_params(nlesscalar) = (; - D = 2, - Re = T(10_000), - tburn = T(0.05), - tsim = T(0.5), - Δt = T(5e-5), - nles = map(n -> (n, n), nlesscalar), # LES resolutions - ndns = (n -> (n, n))(4096), # DNS resolution - ## ndns = (n -> (n, n))(1024), # DNS resolution - filters = (FaceAverage(),), - backend, - create_psolver = psolver_spectral, - icfunc = (setup, psolver, rng) -> - random_field(setup, zero(eltype(setup.grid.x[1])); kp = 20, psolver, rng), - rng, -) - -# Get parameters for multiple LES resolutions -nles = [64, 128, 256] -params_train = (; get_params(nles)..., tsim = T(0.5), savefreq = 10); -params_valid = (; get_params(nles)..., tsim = T(0.1), savefreq = 40); -params_test = (; get_params(nles)..., tsim = T(0.1), savefreq = 10); - -create_data = false -if create_data - ## Create filtered DNS data - data_train = [create_les_data(; params_train...) for _ = 1:5] - data_valid = [create_les_data(; params_valid...) for _ = 1:1] - data_test = [create_les_data(; params_test...) for _ = 1:1] - - ## Save filtered DNS data - jldsave("$datadir/data_train.jld2"; data_train) - jldsave("$datadir/data_valid.jld2"; data_valid) - jldsave("$datadir/data_test.jld2"; data_test) -end - -# Load filtered DNS data -data_train = load("$datadir/data_train.jld2", "data_train"); -data_valid = load("$datadir/data_valid.jld2", "data_valid"); -data_test = load("$datadir/data_test.jld2", "data_test"); - -# Computational time -data_train[1].comptime -data_valid[1].comptime -data_test[1].comptime -map(d -> d.comptime, data_train) -sum(d -> d.comptime, data_train) / 60 -data_test[1].comptime / 60 -sum(dd -> sum(d -> d.comptime, dd), (data_train, data_valid, data_test)) - -# Build LES setups and assemble operators -getsetups(params) = - map(params.nles) do nles - Setup(; - x = ntuple(α -> LinRange(T(0), T(1), nles[α] + 1), params.D), - params.Re, - params.backend, - ) - end - -setups_train = getsetups(params_train); -setups_valid = getsetups(params_valid); -setups_test = getsetups(params_test); - -# Example data inspection -data_train[1].t -data_train[1].data |> size -data_train[1].data[1, 1].u[end][1] - -# Create input/output arrays for a-priori training (ubar vs c) -io_train = create_io_arrays(data_train, setups_train); -io_valid = create_io_arrays(data_valid, setups_valid); -io_test = create_io_arrays(data_test, setups_test); - -# Check that data is reasonably bounded -io_train[1].u |> extrema -io_train[1].c |> extrema -io_valid[1].u |> extrema -io_valid[1].c |> extrema -io_test[1].u |> extrema -io_test[1].c |> extrema - -# Inspect data (live animation with GLMakie) -## GLMakie.activate!() -let - ig = 2 - field, setup = data_train[1].data[ig].u, setups_train[ig] - ## field, setup = data_valid[1].data[ig].u, setups_valid[ig]; - ## field, setup = data_test[.data[ig].u, setups_test[ig]; - u = device.(field[1]) - o = Observable((; u, temp = nothing, t = nothing)) - ## energy_spectrum_plot(o; setup) |> display - fig = fieldplot( - o; - setup, - ## fieldname = :velocitynorm, - ## fieldname = 1, - ) - fig |> display - for i in eachindex(field) - i % 50 == 0 || continue - o[] = (; o[]..., u = device(field[i])) - fig |> display - sleep(0.1) - end -end - -########################################################################## #src - -# ## Define CNN closure models -# -# Define different closure models. -# Use the same random number generator for all initial parameters. - -# Regular CNN -m_cnn = let - rng = Xoshiro(seeds.θ₀) - name = "cnn" - closure, θ₀ = cnn(; - setup = setups_train[1], - radii = [2, 2, 2, 2, 2], - channels = [24, 24, 24, 24, params_train.D], - activations = [tanh, tanh, tanh, tanh, identity], - ## use_bias = [true, true, true, true, false], - use_bias = fill(false, 5), - rng, - ) - (; closure, θ₀, name) -end; -m_cnn.closure.chain - -# Group CNN A: Same number of channels as regular CNN -m_gcnn_a = let - rng = Xoshiro(seeds.θ₀) - name = "gcnn_a" - closure, θ₀ = gcnn(; - setup = setups_train[1], - radii = [2, 2, 2, 2, 2], - channels = [6, 6, 6, 6, 1], - activations = [tanh, tanh, tanh, tanh, identity], - ## use_bias = [true, true, true, true, false], - use_bias = fill(false, 5), - rng, - ) - (; closure, θ₀, name) -end; -m_gcnn_a.closure.chain - -# Group CNN B: Same number of parameters as regular CNN -m_gcnn_b = let - rng = Xoshiro(seeds.θ₀) - name = "gcnn_b" - closure, θ₀ = gcnn(; - setup = setups_train[1], - radii = [2, 2, 2, 2, 2], - channels = [12, 12, 12, 12, 1], - activations = [tanh, tanh, tanh, tanh, identity], - ## use_bias = [true, true, true, true, false], - use_bias = fill(false, 5), - rng, - ) - (; closure, θ₀, name) -end; -m_gcnn_b.closure.chain - -# Store models and initial parameters -models = m_cnn, m_gcnn_a, m_gcnn_b; - -# Give the CNNs a test run -# Note: Data and parameters are stored on the CPU, and -# must be moved to the GPU before running (`device`) -models[1].closure(device(io_train[1].u[:, :, :, 1:50]), device(models[1].θ₀)); -models[2].closure(device(io_train[1].u[:, :, :, 1:50]), device(models[2].θ₀)); -models[3].closure(device(io_train[1].u[:, :, :, 1:50]), device(models[3].θ₀)); - -########################################################################## #src - -# ## A-priori training -# -# Train the models using an a-priori loss function. -# Use the same batch selection random number seed for each model. -# Save parameters to disk after each run. -# Plot training progress (for a validation data batch). - -# Parameter save files -priorfiles = broadcast(eachindex(nles), eachindex(models)') do ig, im - m = models[im] - "$datadir/prior_$(m.name)_igrid$ig.jld2" -end - -# Train -dotrain = false -dotrain && let - for (im, m) in enumerate(models), ig = 1:length(nles) - @info "Training for $(m.name), grid $ig" - clean() - plotfile = "$plotdir/training_prior_$(m.name)_igrid$ig.pdf" - rng = Xoshiro(seeds.training) - starttime = time() - dataloader = create_dataloader_prior(io_train[ig]; batchsize = 100, device) - θ = device(m.θ₀) - loss = create_loss_prior(mean_squared_error, m.closure) - opt = Adam(T(1.0e-3)) - optstate = Optimisers.setup(opt, θ) - it = rand(rng, 1:size(io_valid[ig].u, 4), 50) - validset = device(map(v -> v[:, :, :, it], io_valid[ig])) - (; callbackstate, callback) = create_callback( - create_relerr_prior(m.closure, validset...); - θ, - displayref = true, - displayupdates = true, # Set to `true` if using CairoMakie - nupdate = 20, - ) - (; trainstate, callbackstate) = train(; - dataloader, - loss, - trainstate = (; optstate, θ, rng), - niter = 10_000, - callbackstate, - callback, - ) - θ = callbackstate.θmin # Use best θ instead of last θ - prior = (; θ = Array(θ), comptime = time() - starttime, callbackstate.hist) - jldsave(priorfiles[ig, im]; prior) - save(plotfile, current_figure()) - end - clean() -end - -# Load learned parameters and training times -prior = load.(priorfiles, "prior"); -θ_cnn_prior = broadcast(eachindex(nles), eachindex(models)') do ig, im - m = models[im] - p = prior[ig, im] - copyto!(device(m.θ₀), p.θ) -end; - -# Check that parameters are within reasonable bounds -using Statistics -CUDA.@allowscalar θ_cnn_prior[1] |> std -CUDA.@allowscalar θ_cnn_prior[2] |> std -CUDA.@allowscalar θ_cnn_prior[3] |> std -θ_cnn_prior[1] |> extrema -θ_cnn_prior[2] |> extrema -θ_cnn_prior[3] |> extrema - -# Training times -map(p -> p.comptime, prior) -map(p -> p.comptime, prior) |> vec -map(p -> p.comptime, prior) |> sum # Seconds -map(p -> p.comptime, prior) |> sum |> x -> x / 60 # Minutes -map(p -> p.comptime, prior) |> sum |> x -> x / 3600 # Hours - -########################################################################## #src - -# ## Error analysis - -# ### Compute a-priori errors -# -# Note that it is still interesting to compute the a-priori errors for the -# a-posteriori trained CNN. - -e_prior = let - e = zeros(T, length(nles), length(models)) - for (im, m) in enumerate(models), ig = 1:length(nles) - @info "Computing a-priori error for $(m.name), grid $ig" - testset = device(io_test[ig]) - err = create_relerr_prior(m.closure, testset...) - e[ig, im] = err(θ_cnn_prior[ig, im]) - end - e -end -clean() - -e_prior - -# # Compute a-posteriori errors - -(; e_nm, e_m) = let - e_nm = zeros(T, length(nles)) - e_m = zeros(T, length(nles), length(models)) - for ig = 1:size(data_test[1].data, 1) - clean() - setup = setups_test[ig] - psolver = psolver_spectral(setup) - data = (; u = device.(data_test[1].data[ig].u), t = data_test[1].t) - nupdate = 2 - @info "Computing a-posteriori error for no-model, grid $ig" - err = - create_relerr_post(; data, setup, psolver, closure_model = nothing, nupdate) - e_nm[ig] = err(nothing) - for (im, m) in enumerate(models) - @info "Computing a-posteriori error for $(m.name), grid $ig" - err = create_relerr_post(; - data, - setup, - psolver, - closure_model = wrappedclosure(m.closure, setup), - nupdate, - ) - e_m[ig, im] = err(θ_cnn_prior[ig, im]) - end - end - (; e_nm, e_m) -end -clean() - -e_nm -e_m - -# ### Compute symmetry errors - -# A-priori errors -e_symm_prior = let - e = zeros(T, length(nles), length(models)) - for (im, m) in enumerate(models), ig = 1:length(nles) - @info "Computing a-priori equivariance error for $(m.name), grid $ig" - setup = setups_test[ig] - setup = (; setup..., closure_model = wrappedclosure(m.closure, setup)) - err = - create_relerr_symmetry_prior(; u = device.(data_test[1].data[ig].u), setup) - e[ig, im] = err(θ_cnn_prior[ig, im]) - end - e -end -clean() - -e_symm_prior - -# A-posteriori errors -e_symm_post = let - e = zeros(T, length(nles), length(models)) - for (im, m) in enumerate(models), ig = 1:size(data_test[1].data, 1) - @info "Computing a-posteriori equivariance error for $(m.name), grid $ig" - setup = setups_test[ig] - setup = (; setup..., closure_model = wrappedclosure(m.closure, setup)) - err = create_relerr_symmetry_post(; - u = device.(data_test[1].data[ig].u[1]), - setup, - psolver = psolver_spectral(setup), - Δt = (data_test[1].t[2] - data_test[1].t[1]) / 2, - nstep = 10, # length(data_test[1].t) - 1, - g = 1, - ) - e[ig, im] = err(θ_cnn_prior[ig, im]) - end - e -end -clean() - -e_symm_post - -# ### Plot errors - -let - for (e, title, filename) in [ - (e_prior, "A-priori error", "error_prior.pdf"), - (e_m, "A-posteriori error", "error_post.pdf"), - (e_symm_prior, "A-priori equivariance error", "error_symm_prior.pdf"), - (e_symm_post, "A-posteriori equivariance error", "error_symm_post.pdf"), - ] - fig = Figure() - ax = Axis( - fig[1, 1]; - xscale = log10, - yscale = log10, - xticks = nles, - xlabel = "LES grid size", - ylabel = "Relative error", - title, - ) - markers = [:circle, :utriangle, :rect, :diamond] - for (i, m) in enumerate(models) - scatterlines!(ax, nles, e[:, i]; marker = markers[i], label = m.name) - end - axislegend(ax) - display(fig) - save(joinpath(plotdir, filename), fig) - end -end diff --git a/lib/SymmetryClosure/test_dns.jl b/lib/SymmetryClosure/test_dns.jl deleted file mode 100644 index d2250035d..000000000 --- a/lib/SymmetryClosure/test_dns.jl +++ /dev/null @@ -1,153 +0,0 @@ -using IncompressibleNavierStokes -using GLMakie -using CairoMakie -using NeuralClosure -using SymmetryClosure -using LinearAlgebra - -T = Float64 -backend = CPU() -n = 64 -setup = Setup(; - x = (LinRange(T(0), T(1), n + 1), LinRange(T(0), T(1), n + 1)), - Re = T(2000), - backend, -); -psolver = psolver_spectral(setup); -ustart = random_field(setup, T(0); psolver); - -state, _ = solve_unsteady(; - setup, - ustart, - tlims = (T(0), T(0.5)), - processors = ( - # rtp = realtimeplotter(; setup, nupdate = 10), - log = timelogger(; nupdate = 100), - ), -); - -ustart_rot = rot2stag(ustart, 1); - -state_rot, _ = solve_unsteady(; - setup, - ustart = ustart_rot, - tlims = (T(0), T(0.5)), - processors = ( - # rtp = realtimeplotter(; setup, nupdate = 10), - log = timelogger(; nupdate = 100), - ), -); - -let - u, v = rot2(state.u, 1) - u, v = state_rot.u - x = setup.grid.xp[1][2:end-1] - y = setup.grid.xp[1][2:end-1] - ux = state.u[1][setup.grid.Iu[1]] - uy = state.u[2][setup.grid.Iu[2]] - n = sqrt.(ux .^ 2 .+ uy .^ 2) - arrows( - x, - y, - ux, - uy; - # arrowsize = vec(n), - arrowsize = 10, - arrowcolor = vec(n), - linecolor = vec(n), - lengthscale = 0.04, - figure = (; size = (500, 500)), - ) -end - -let - F = IncompressibleNavierStokes.momentum(ustart, nothing, T(0), setup) - FR = IncompressibleNavierStokes.momentum(ustart_rot, nothing, T(0), setup) - IncompressibleNavierStokes.apply_bc_u!(F, T(0), setup) - RF = rot2stag(F, 1) - IncompressibleNavierStokes.apply_bc_u!(RF, T(0), setup) - i = 2 - # a = RF[i] - # b = FR[i] - a = RF[i][setup.grid.Iu[i]] - b = FR[i][setup.grid.Iu[i]] - norm(a - b) / norm(b) - a - b -end - -using Random -using NeuralClosure - -closure, θ₀ = gcnn(; - setup, - radii = [2, 2, 2, 2, 2], - channels = [6, 6, 6, 6, 1], - activations = [tanh, tanh, tanh, tanh, identity], - use_bias = [true, true, true, true, false], - rng = Xoshiro(), -); - -closure.chain - -u = ustart -v = (u[1][2:end-1, 2:end-1], u[2][2:end-1, 2:end-1]) -rv = rot2stag(v, 1) -w = cat(stack(v); dims = 4) -rw = cat(stack(rv); dims = 4) -c = closure(w, θ₀) -cr = closure(rw, θ₀) -rc = rot2stag((c[:, :, 1, 1], c[:, :, 2, 1]), 1) - -cr[:, :, 1, 1] |> heatmap -rc[1] |> heatmap - -m = wrappedclosure(closure, setup) - -rm = rot2stag(m(state.u, θ₀), 1) -mr = m(rot2stag(state.u, 1), θ₀) - -rm[1] -mr[1] - -ran = 0.4 -heatmap(rm[2]; colorrange = (-ran, ran)) -heatmap(mr[2]; colorrange = (-ran, ran)) - -a = fill(10, 5) .+ (1:5)' -b = (1:5) .* (1:5)' -x = rot2((a, b), 1) -x[1] -x[2] - -x = LinRange(0, 2pi, 20) -y = LinRange(0, 3pi, 20) -u = @. sin(x) * cos(y') * (x < pi) * (y' > pi) -v = @. -cos(x) * sin(y') * (x < pi) * (y' > pi) -strength = sqrt.(u .^ 2 .+ v .^ 2) - -v - -arrows( - x, - y, - u, - v; - arrowsize = 10, - lengthscale = 0.3, - arrowcolor = vec(strength), - linecolor = vec(strength), - figure = (; size = (600, 600)), -) - -g = 3 -arrows( - y, - x, - # x, y, - rot2((u, v), g)...; - arrowsize = 10, - lengthscale = 0.3, - arrowcolor = vec(rot2(strength, g)), - linecolor = vec(rot2(strength, g)), - figure = (; size = (600, 600)), -) diff --git a/lib/SymmetryClosure/test_equivariance.jl b/lib/SymmetryClosure/test_equivariance.jl deleted file mode 100644 index a2df5a2ee..000000000 --- a/lib/SymmetryClosure/test_equivariance.jl +++ /dev/null @@ -1,39 +0,0 @@ -using IncompressibleNavierStokes -using NeuralClosure -using NNlib -using Lux -using Random - -T = Float64 -rng = Xoshiro() - -init_bias = glorot_normal -# init_bias = zeros32 -gcnn = Chain( - GroupConv2D((3, 3), 1 => 5, tanh; init_bias, islifting = true), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 1; use_bias = false, isprojecting = true), -) - -params, state = Lux.setup(rng, gcnn) |> f64 - -n = 100 -ux = randn(T, n, n, 1, 1) -uy = randn(T, n, n, 1, 1) -u = (ux, uy) -ru = rot2(u, 1) -u = cat(u...; dims = 3) -ru = cat(ru...; dims = 3) - -c, _ = gcnn(u, params, state) -cr, _ = gcnn(ru, params, state) -c = (c[:, :, 1, 1], c[:, :, 2, 1]) -cr = (cr[:, :, 1, 1], cr[:, :, 2, 1]) -rc = rot2(c, 1) - -using LinearAlgebra -norm(cr[1] - rc[1]) / norm(rc[1]) -norm(cr[2] - rc[2]) / norm(rc[2]) diff --git a/lib/SymmetryClosure/test_tensor.jl b/lib/SymmetryClosure/test_tensor.jl deleted file mode 100644 index 9efd16347..000000000 --- a/lib/SymmetryClosure/test_tensor.jl +++ /dev/null @@ -1,161 +0,0 @@ -if false - include("src/SymmetryClosure.jl") - using .SymmetryClosure -end - -# using GLMakie -using CairoMakie -using IncompressibleNavierStokes -using SymmetryClosure -using CUDA -using Zygote -using Random -using LinearAlgebra -using NeuralClosure - -lines(cumsum(randn(100))) - -# Setup -n = 128 -ax = range(0.0, 1.0, n + 1) -x = ax, ax -# x = tanh_grid(0.0, 1.0, n + 1), stretched_grid(-0.2, 1.2, n + 1) -setup = Setup(; - x, - Re = 1e4, - backend = CUDABackend(), - # boundary_conditions = ((DirichletBC(), DirichletBC()), (DirichletBC(), DirichletBC())), -); -ustart = random_field(setup) -# ustart = vectorfield(setup) |> randn! - -u = ustart - -let - B, V = tensorbasis(u, setup) - # B, V = randn!(B), randn!(V) - V = randn!(V) - function f(u) - Bi, Vi = tensorbasis(u, setup) - # dot(Bi, B) + dot(Vi, V) - # dot(getindex.(Bi, 1), getindex.(B, 1)) + dot(Vi, V) - dot(Vi, V) - # dot(Vi[:, :, 1], V[:, :, 1]) - end - - fd = map(eachindex(u)) do i - h = 1e-2 - v1 = copy(u) - v2 = copy(u) - CUDA.@allowscalar v1[i] -= h / 2 - CUDA.@allowscalar v2[i] += h / 2 - (f(v2) - f(v1)) / h - end |> x -> reshape(x, size(u)) - - ad = Zygote.gradient(f, u)[1] |> Array - - # mask = @. abs(fd - ad) > 1e-3 - - # i = 1 - # V[:, :, i] |> display - # # (mask .* u)[:, :, i] |> display - # (mask .* fd)[:, :, i] |> display - # (mask .* ad)[:, :, i] |> display - - # fd .- ad |> display - @show fd - ad .|> abs |> maximum - # @show f(u) - nothing -end - -urot = NeuralClosure.rot2stag(u, 1) -B, V = tensorbasis(u, setup) -Brot, Vrot = tensorbasis(urot, setup) - -iV = 2 -# V[:, :, iV] |> Array |> heatmap -Vrot[:, :, iV] |> Array |> heatmap -rot2(V, 1)[:, :, iV] |> Array |> heatmap - -Vrot - rot2(V, 1) .|> abs |> maximum - -begin - θ = randn(5, 3) - # θ[:, 1] .= 0 - # θ[:, 3] .= 0 - # θ = zeros(5, 3) - # θ[3, :] .= 1e-3 * randn() - θ = θ |> CuArray - s = tensorclosure(polynomial, u, θ, setup) |> u -> apply_bc_u(u, zero(eltype(u)), setup) - srot = - tensorclosure(polynomial, urot, θ, setup) |> - u -> apply_bc_u(u, zero(eltype(u)), setup) - rots = NeuralClosure.rot2stag(s, 1) - (srot-rots)[2:end-1, 2:end-1, :] .|> abs |> maximum -end - -i = 1 -srot[2:end-1, 2:end-1, i] |> Array |> heatmap -rots[2:end-1, 2:end-1, i] |> Array |> heatmap -# s[2:end-1, 2:end-1, 2] |> Array |> heatmap -(srot-rots)[2:end-1, 2:end-1, i] |> Array |> heatmap -(srot-rots)[2:end-1, 2:end-1, :] .|> abs |> maximum -he = (srot-rots)[2:end-1, 2:end-1, :] -he[125:128, 1:10, 1] - -x = randn(5, 5, 2) - -typeof(B) -getindex.(B, 1) - -B[:, :, 1] - -tb, pb = SymmetryClosure.ChainRulesCore.rrule(tensorbasis, u, setup) - -ubar = pb(tb)[2] - -θ = 1e-5 * randn(5, 3) -θ = θ |> CuArray -s = tensorclosure(polynomial, ustart, θ, setup); - -gradient(u -> sum(tensorclosure(polynomial, u, θ, setup)), ustart)[1]; - -using StaticArrays -tau = similar(setup.grid.x[1], SMatrix{2,2,Float64,4}, 10) -zero(tau) - -s[2:end-1, 2:end-1, 1] |> heatmap -s[2:end-1, 2:end-1, 1] .|> abs |> heatmap -s[2:end-1, 2:end-1, 1] .|> abs .|> log |> heatmap - -B |> length -B[2][5, 10] -getindex.(B[2], 2) - -u[:, :, 1] |> heatmap -getindex.(B[3], 1, 1) |> heatmap -getindex.(B[3], 2, 1) |> heatmap -getindex.(B[3], 2, 2) |> heatmap -V[1][2:end-1, 2:end-1] |> heatmap -V[2][2:end-1, 2:end-1] |> heatmap - -# Solve unsteady problem -state, outputs = solve_unsteady(; - setup, - ustart, - tlims = (0.0, 1.0), - processors = (log = timelogger(; nupdate = 10),), -); -(; u) = state - -makeplot(u, fieldname, time) = save( - "output/fieldplots/$fieldname$time.png", - fieldplot((; u, t = T(0)); setup, fieldname, docolorbar = false, size = (500, 500)), -) - -makeplot(ustart, :vorticity, 0) -makeplot(ustart, :S2, 0) -makeplot(ustart, :R2, 0) -makeplot(u, :vorticity, 1) -makeplot(u, :S2, 1) -makeplot(u, :R2, 1) diff --git a/lib/SymmetryClosure/test_zygote.jl b/lib/SymmetryClosure/test_zygote.jl deleted file mode 100644 index 8af5f48a8..000000000 --- a/lib/SymmetryClosure/test_zygote.jl +++ /dev/null @@ -1,38 +0,0 @@ -using IncompressibleNavierStokes -using NeuralClosure -using Lux -using Random -using Zygote -using ComponentArrays - -T = Float32 -rng = Xoshiro() - -init_bias = glorot_normal -c = Chain( - GroupConv2D((3, 3), 1 => 5, tanh; init_bias, islifting = true), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 5, tanh; init_bias), - GroupConv2D((3, 3), 5 => 1; use_bias = false, isprojecting = true), -) - -# c = GroupConv2D((3, 3), 5 => 5, tanh; init_bias) - -params, state = Lux.setup(rng, c) - -Lux.parameterlength(c) - -n = 100 -ux = randn(T, n, n, 1, 1) -uy = randn(T, n, n, 1, 1) -u = (ux, uy) -u = cat(u...; dims = 3) - -c(u, params, state) - -sum(abs2, c(u, params, state)[1]) - -gradient(u -> sum(abs2, c(u, params, state)[1]), u) -gradient(p -> sum(abs2, c(u, p, state)[1]), params) diff --git a/lib/SymmetryClosure/train_models.jl b/lib/SymmetryClosure/train_models.jl deleted file mode 100644 index 12bdf1158..000000000 --- a/lib/SymmetryClosure/train_models.jl +++ /dev/null @@ -1,122 +0,0 @@ -# LSP hack (to get "go to definition" etc. working) #src -if false #src - include("src/SymmetryClosure.jl") #src - include("../NeuralClosure/src/NeuralClosure.jl") #src - include("../../src/IncompressibleNavierStokes.jl") #src - using .SymmetryClosure #src - using .NeuralClosure #src - using .IncompressibleNavierStokes #src -end #src - -########################################################################## #src - -# # Train models -# -# This script is used to train models. - -@info "# Train models" - -########################################################################## #src - -# ## Packages - -@info "Loading packages" -flush(stdout) - -using Accessors -# using CairoMakie -using IncompressibleNavierStokes -using Lux -using LuxCUDA -using Optimisers -using ParameterSchedulers -using NeuralClosure -using Random -using SymmetryClosure -using WGLMakie -using Zygote - -########################################################################## #src - -# ## Setup - -# Get SLURM specific variables (if any) -(; jobid, taskid) = slurm_vars() - -# Log info about current run -time_info() - -# Hardware selection -(; backend, device, clean) = hardware() - -# Test case -(; params, outdir, plotdir, seed_dns, ntrajectory) = testcase(backend) - -# DNS seeds -dns_seeds = splitseed(seed_dns, ntrajectory) -dns_seeds_test = dns_seeds[1:1] -dns_seeds_valid = dns_seeds[2:2] -dns_seeds_train = dns_seeds[3:end] - -########################################################################## #src - -# ## Model definitions - -setups = map(nles -> getsetup(; params, nles), params.nles); - -tensorcoeffs, θ_start = polynomial, zeros(5, 3) -tensorcoeffs, θ_start = create_cnn(; - setup = setups[1], - radii = [0, 0, 0, 0], # Pointwise network - channels = [24, 24, 24, 3], - activations = [tanh, tanh, tanh, identity], - use_bias = [true, true, true, false], - rng = Xoshiro(123), -); -tensorcoeffs.m.chain -closure_models = map(s -> tensorclosure(tensorcoeffs, s), setups) - -# Test run -let - data_test = map( - s -> namedtupleload(getdatafile(outdir, params.nles[1], params.filters[1], s)), - dns_seeds_test, - ) - @show size(data_test[1].u) - sample = data_test[1].u[:, :, :, 1] |> device - θ = θ_start |> device # |> randn! |> x -> x ./ 1e6 - closure_models[1](sample, θ) - gradient(θ -> sum(closure_models[1](sample, θ)), θ)[1] -end - -# Train -let - T = typeof(params.Re) - l0 = T(1e-4) - l1 = T(1e-6) - nepoch = 10 - trainpost(; - params, - outdir, - plotdir, - taskid, - postseed = 123, - dns_seeds_train, - dns_seeds_valid, - nsubstep = 5, - nunroll = 5, - ntrajectory = 3, - closure_models, - θ_start, - opt = Adam(l0), - λ = T(5e-8), - scheduler = CosAnneal(; l0, l1, period = nepoch), - nunroll_valid = 50, - nupdate_callback = 10, - displayref = false, - displayupdates = false, - loadcheckpoint = false, - nepoch, - niter = 100, - ) -end From 790945b4509cc46c92dd66830bb14c0ae9dff764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Syver=20D=C3=B8ving=20Agdestein?= Date: Tue, 14 Jan 2025 16:04:13 +0100 Subject: [PATCH 3/6] ci: remove excised subdirs --- .github/workflows/CompatHelper.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml index 520ee94ff..971bf5bfd 100644 --- a/.github/workflows/CompatHelper.yml +++ b/.github/workflows/CompatHelper.yml @@ -27,7 +27,5 @@ jobs: "examples", "lib/NeuralClosure", "lib/NeuralClosure/test", - "lib/PaperDC", - "lib/SymmetryClosure", ], ) From 549fce022145cdec778967a24a1c65ac86b38447 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Syver=20D=C3=B8ving=20Agdestein?= Date: Tue, 14 Jan 2025 16:07:08 +0100 Subject: [PATCH 4/6] chore: remove old development scripts --- .tools/README.md | 3 --- .tools/develop.jl | 23 ----------------------- .tools/servedocs.jl | 22 ---------------------- .tools/servevitepress.jl | 13 ------------- .tools/update.jl | 20 -------------------- docs/README.md | 26 +++----------------------- 6 files changed, 3 insertions(+), 104 deletions(-) delete mode 100644 .tools/README.md delete mode 100644 .tools/develop.jl delete mode 100644 .tools/servedocs.jl delete mode 100644 .tools/servevitepress.jl delete mode 100644 .tools/update.jl diff --git a/.tools/README.md b/.tools/README.md deleted file mode 100644 index 3a67647bd..000000000 --- a/.tools/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Tools - -Development tools for IncompressibleNavierStokes.jl. diff --git a/.tools/develop.jl b/.tools/develop.jl deleted file mode 100644 index 48291deab..000000000 --- a/.tools/develop.jl +++ /dev/null @@ -1,23 +0,0 @@ -# Develop all environments in the project - -using Pkg - -envs = ( - ".", - "lib/NeuralClosure", - "lib/NeuralClosure/test", - "lib/PaperDC", - "lib/SymmetryClosure", - "lib/SciMLCompat", - "test", - "examples", - "docs", -) - -root = joinpath(@__DIR__, "..") - -for e in envs - e = joinpath(root, e) - Pkg.activate(e) - Pkg.instantiate() -end diff --git a/.tools/servedocs.jl b/.tools/servedocs.jl deleted file mode 100644 index 901f5c7be..000000000 --- a/.tools/servedocs.jl +++ /dev/null @@ -1,22 +0,0 @@ -# Build the documentation and rebuild on changes - -root = joinpath(@__DIR__, "..") -docs = joinpath(root, "docs") - -using Pkg -Pkg.activate(docs) - -# Add live server environment -push!(LOAD_PATH, "@LiveServer") - -using LiveServer - -# Trigger local development modifications -ENV["LOCALDEV"] = true - -# Build documentation and rebuild on changes -servedocs(; - foldername = docs, - literate = joinpath(root, "examples"), - skip_dir = joinpath(docs, "src", "examples", "generated"), -) diff --git a/.tools/servevitepress.jl b/.tools/servevitepress.jl deleted file mode 100644 index c0633467c..000000000 --- a/.tools/servevitepress.jl +++ /dev/null @@ -1,13 +0,0 @@ -# Serve the Vitepress site after building documentation - -using Pkg - -docs = joinpath(@__DIR__, "..", "docs") - -Pkg.activate(docs) - -using DocumenterVitepress -DocumenterVitepress.dev_docs( - joinpath(docs, "build"), - # md_output_path = joinpath(docs, "build/.documenter"), -) diff --git a/.tools/update.jl b/.tools/update.jl deleted file mode 100644 index 474b5ae84..000000000 --- a/.tools/update.jl +++ /dev/null @@ -1,20 +0,0 @@ -# Update all environments in the project - -envs = ( - ".", - "lib/NeuralClosure", - "lib/NeuralClosure/test", - "lib/PaperDC", - "lib/SymmetryClosure", - "lib/SciMLCompat", - "test", - "examples", - "docs", -) - -root = joinpath(@__DIR__, "..") - -for e in envs - e = joinpath(root, e) - run(`julia --project=$e -e 'using Pkg; Pkg.update()'`) -end diff --git a/docs/README.md b/docs/README.md index 362f52a2d..f4d6ef0ca 100644 --- a/docs/README.md +++ b/docs/README.md @@ -8,31 +8,11 @@ Documentation for To build the documentation locally, run: ```sh -julia --project=docs docs/make.jl +julia --project=docs -e "using Pkg; Pkg.instantiate()" ``` -## Build documentation with live preview - -Add a [`LiveServer.jl`](https://github.com/tlienart/LiveServer.jl) environment: +and then run ```sh -julia --project=@LiveServer -e 'using Pkg; Pkg.add("LiveServer")' -``` - -Currently, two julia processes are required for local documentation development. -See . - -The script `servedocs.jl` builds the documentation and rebuilds on changes. -The script `servevitepress.jl` serves the vitepress site live. - -In two separate shells, run - -```sh -julia .tools/servedocs.jl -``` - -and, once it is done, then run in a second shell - -```sh -julia .tools/servevitepress.jl +julia --project=docs docs/make.jl ``` From 762649a7b9f8ab5a37617d6223c3f45b0e95d39a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Syver=20D=C3=B8ving=20Agdestein?= Date: Tue, 14 Jan 2025 16:08:32 +0100 Subject: [PATCH 5/6] docs: remove excised examples --- docs/src/examples/index.md | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/docs/src/examples/index.md b/docs/src/examples/index.md index e1c69d901..f9ad5814c 100644 --- a/docs/src/examples/index.md +++ b/docs/src/examples/index.md @@ -118,27 +118,6 @@ const temperature = [ desc: "Convection generated by a temperature gradient between hot and cold fluids", }, ]; - -const neural = [ - { - href: "generated/prioranalysis", - src: "../logo.svg", - caption: "Filter analysis", - desc: "Compare discrete filters and their properties with DNS data", - }, - { - href: "generated/postanalysis", - src: "../logo.svg", - caption: "CNN closure models", - desc: "Compare CNN closure models for different filters, grid sizes, and projection orders", - }, - { - href: "generated/symmetryanalysis", - src: "../logo.svg", - caption: "Symmetry-preserving closure models", - desc: "Train group equivariant CNNs and compare to normal CNNs", - }, -]; ``` @@ -169,9 +148,3 @@ folder. ```@raw html ``` - -## Neural network closure models - -```@raw html - -``` From cb68727006463799b43ab1e99f3249f241f6c9a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Syver=20D=C3=B8ving=20Agdestein?= Date: Tue, 14 Jan 2025 16:11:22 +0100 Subject: [PATCH 6/6] docs: update link in README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4ea46362e..7f9bdf924 100644 --- a/README.md +++ b/README.md @@ -36,8 +36,8 @@ for examples of some typical workflows. More examples can be found in the ## Source code for paper -See [here](./lib/PaperDC) for the source code used in the paper -[Discretize first, filter next: learning divergence-consistent closure models for large-eddy simulation](https://arxiv.org/abs/2403.18088). +See [this repository](https://github.com/agdestein/DivergenceConsistency) for the source code used in the paper +[Discretize first, filter next: learning divergence-consistent closure models for large-eddy simulation](https://www.sciencedirect.com/science/article/pii/S0021999124008258). ## Gallery