Skip to content

Commit

Permalink
speed nn
Browse files Browse the repository at this point in the history
  • Loading branch information
jchavesmontero committed Aug 27, 2024
1 parent 445e05d commit 048d875
Show file tree
Hide file tree
Showing 5 changed files with 94 additions and 154 deletions.
19 changes: 15 additions & 4 deletions lace/emulator/base_emulator.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,22 @@
import numpy as np
from abc import ABC, abstractmethod


class BaseEmulator(ABC):
"""Minimum amount of documentation"""

@abstractmethod
def emulate_p1d_Mpc(self,model,k_Mpc,return_covar=False,z=None):
"""Minimum amount of documentation (input, output) """
pass
def emulate_p1d_Mpc(self, model, k_Mpc, return_covar=False, z=None):
"""
Emulate P1D values for a given set of k values in Mpc units.
Args:
model (dict): Dictionary containing the model parameters.
k_Mpc (np.ndarray): Array of k values in Mpc units.
return_covar (bool, optional): Whether to return covariance. Defaults to False.
z (float, optional): Redshift value. Defaults to None.
Returns:
np.ndarray: Emulated P1D values.
"""
pass
26 changes: 13 additions & 13 deletions lace/emulator/gp_emulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,8 +403,8 @@ def _fit_p1d_in_archive(self, deg, kmax_Mpc):
"""
Fit a polynomial to the logarithm of P1D for each entry in the archive.
This method fits a polynomial to the logarithm of the power spectrum data (`p1d_Mpc`)
for each entry in the `training_data` using a polynomial of the specified degree (`deg`),
This method fits a polynomial to the logarithm of the power spectrum data (`p1d_Mpc`)
for each entry in the `training_data` using a polynomial of the specified degree (`deg`),
over the range of k values up to `kmax_Mpc`.
:param deg: Degree of the polynomial for fitting.
Expand All @@ -426,7 +426,7 @@ def _k_bin_sm_p1d_in_archive(self, kmax_Mpc):
"""
Apply smoothing to P1D data for each entry in the archive.
This method performs kernel smoothing on the P1D data for each entry in the `training_data`
This method performs kernel smoothing on the P1D data for each entry in the `training_data`
using the specified maximum k value (`kmax_Mpc`), and updates each entry with the smoothed data.
:param kmax_Mpc: Maximum k value in Mpc^-1 for smoothing.
Expand All @@ -451,9 +451,9 @@ def _build_interp(self):
"""
Build Gaussian Process (GP) models from training data and parameter grid.
This method constructs Gaussian Process models based on the training data and parameter grid.
This method constructs Gaussian Process models based on the training data and parameter grid.
It involves rescaling parameters, normalizing training data, and initializing the GP models.
Depending on the `emu_per_k` flag, it either builds a separate GP model for each k-bin
Depending on the `emu_per_k` flag, it either builds a separate GP model for each k-bin
or a single GP model for all k-bins.
:return: None
Expand Down Expand Up @@ -506,7 +506,7 @@ def _get_param_limits(self, paramGrid):
"""
Get the minimum and maximum values for each parameter.
This method computes the minimum and maximum values for each parameter in the provided
This method computes the minimum and maximum values for each parameter in the provided
parameter grid (`paramGrid`), which is used for rescaling parameters to a unit volume.
:param paramGrid: 2D array where each column represents a parameter and each row represents a training point.
Expand All @@ -525,8 +525,8 @@ def train(self):
"""
Train the Gaussian Process (GP) emulator.
This method initializes and optimizes the Gaussian Process models. If `emu_per_k` is True,
it trains multiple GP models, one for each k-bin. Otherwise, it trains a single GP model
This method initializes and optimizes the Gaussian Process models. If `emu_per_k` is True,
it trains multiple GP models, one for each k-bin. Otherwise, it trains a single GP model
for all k-bins.
:return: None
Expand Down Expand Up @@ -554,7 +554,7 @@ def printPriorVolume(self):
"""
Print the limits for each parameter.
This method prints the minimum and maximum values for all parameters used in the emulator.
This method prints the minimum and maximum values for all parameters used in the emulator.
This provides a way to inspect the parameter space covered by the emulator.
:return: None
Expand All @@ -566,7 +566,7 @@ def return_unit_call(self, model):
"""
For a given model in dictionary format, return an ordered parameter list with the values rescaled to unit volume.
This method takes a model dictionary, rescales the parameter values to a unit volume,
This method takes a model dictionary, rescales the parameter values to a unit volume,
and returns the list of rescaled parameter values.
:param model: Dictionary containing parameter values with keys as parameter names.
Expand All @@ -587,7 +587,7 @@ def check_in_hull(self, model):
"""
Check if a given model is within the convex hull of the training points.
This method checks if the rescaled parameter values of a given model are within the
This method checks if the rescaled parameter values of a given model are within the
convex hull of the training points' parameter space.
:param model: Dictionary containing parameter values with keys as parameter names.
Expand All @@ -605,7 +605,7 @@ def predict(self, model, z=None):
"""
Return P1D or polynomial fit coefficients for a given parameter set.
This method provides predictions of P1D values or polynomial coefficients based on
This method provides predictions of P1D values or polynomial coefficients based on
the provided model parameters. It can also return error estimates if required.
:param model: Dictionary containing parameter values with keys as parameter names.
Expand Down Expand Up @@ -738,7 +738,7 @@ def get_nearest_distance(self, model, z=None):
"""
Get the Euclidean distance to the nearest training point in the rescaled parameter space.
This method computes the Euclidean distance from the given model parameters to the nearest
This method computes the Euclidean distance from the given model parameters to the nearest
training point in the rescaled parameter space.
:param model: Dictionary containing parameter values with keys as parameter names.
Expand Down
12 changes: 9 additions & 3 deletions lace/emulator/nn_architecture.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import torch
from torch import nn


class MDNemulator_polyfit(torch.nn.Module):
"""
A neural network model for emulating power spectrum P1D using polynomial fitting.
Expand All @@ -12,6 +13,7 @@ class MDNemulator_polyfit(torch.nn.Module):
max_neurons (int, optional): Maximum number of neurons in any layer. Defaults to 100.
ninput (int, optional): Number of input features. Defaults to 6.
"""

def __init__(self, nhidden, ndeg, max_neurons=100, ninput=6):
super().__init__()
self.inputlay = torch.nn.Sequential(
Expand All @@ -26,10 +28,14 @@ def __init__(self, nhidden, ndeg, max_neurons=100, ninput=6):
self.hiddenlay = nn.Sequential(*modules)

self.means = torch.nn.Sequential(
nn.Linear(max_neurons, 50), nn.LeakyReLU(0.5), nn.Linear(50, ndeg + 1)
nn.Linear(max_neurons, 50),
nn.LeakyReLU(0.5),
nn.Linear(50, ndeg + 1),
)
self.stds = torch.nn.Sequential(
nn.Linear(max_neurons, 50), nn.LeakyReLU(0.5), nn.Linear(50, ndeg + 1)
nn.Linear(max_neurons, 50),
nn.LeakyReLU(0.5),
nn.Linear(50, ndeg + 1),
)

def forward(self, inp):
Expand All @@ -40,7 +46,7 @@ def forward(self, inp):
inp (torch.Tensor): Input tensor containing features.
Returns:
tuple:
tuple:
- torch.Tensor: Emulated P1D values.
- torch.Tensor: Logarithm of the standard deviation of P1D values.
"""
Expand Down
Loading

0 comments on commit 048d875

Please sign in to comment.