Coverage for src/edelweiss/custom_regs.py: 100%
29 statements
« prev ^ index » next coverage.py v7.6.7, created at 2024-11-18 17:09 +0000
« prev ^ index » next coverage.py v7.6.7, created at 2024-11-18 17:09 +0000
1# Copyright (C) 2024 ETH Zurich
2# Institute for Particle Physics and Astrophysics
3# Author: Silvan Fischbacher
4# created: Sat Jan 27 2024
6import tensorflow as tf
7from sklearn.base import BaseEstimator
8from tensorflow.keras.callbacks import EarlyStopping
9from tensorflow.keras.layers import Dropout
11from edelweiss.tf_utils import EpochProgressCallback
14class NeuralNetworkRegressor(BaseEstimator):
15 """
16 Neural network regressor based on Keras Sequential model
18 :param hidden_units: tuple/list, optional (default=(64, 64))
19 The number of units per hidden layer
20 :param learning_rate: float, optional (default=0.001)
21 The learning rate for the Adam optimizer
22 :param epochs: int, optional (default=10)
23 The number of epochs to train the model
24 :param batch_size: int, optional (default=32)
25 The batch size for training the model
26 :param loss: str, optional (default="mse")
27 The loss function to use
28 :param activation: str, optional (default="relu")
29 The activation function to use for the hidden layers
30 :param activation_output: str, optional (default="linear")
31 The activation function to use for the output layer
32 """
34 def __init__(
35 self,
36 hidden_units=(64, 64),
37 learning_rate=0.001,
38 epochs=10,
39 batch_size=32,
40 loss="mse",
41 activation="relu",
42 activation_output="linear",
43 dropout_prob=0.0,
44 ):
45 self.hidden_units = hidden_units
46 self.learning_rate = learning_rate
47 self.epochs = epochs
48 self.batch_size = batch_size
49 self.loss = loss
50 self.activation = activation
51 self.activation_output = activation_output
52 self.dropout_prob = dropout_prob
54 def fit(self, X, y, sample_weight=None, early_stopping_patience=10):
55 """
56 Fit the neural network model
58 :param X: array-like, shape (n_samples, n_features)
59 The training input samples
60 :param y: array-like, shape (n_samples, n_outputs)
61 The target values
62 :param sample_weight: array-like, shape (n_samples,), optional (default=None)
63 :param early_stopping_patience: int, optional (default=10)
64 The number of epochs with no improvement after which training will be
65 stopped
66 """
68 # create model
69 model = tf.keras.Sequential()
70 model.add(
71 tf.keras.layers.Dense(
72 self.hidden_units[0],
73 input_dim=X.shape[1],
74 activation=self.activation,
75 )
76 )
77 for units in self.hidden_units[1:]:
78 model.add(tf.keras.layers.Dense(units, activation=self.activation))
79 model.add(Dropout(self.dropout_prob))
80 model.add(tf.keras.layers.Dense(y.shape[1], activation=self.activation_output))
81 model.summary()
83 # compile model
84 model.compile(
85 loss=self.loss,
86 optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate),
87 metrics=["mse"],
88 weighted_metrics=["mse"],
89 )
91 # fit model
92 early_stopping = EarlyStopping(
93 monitor="val_loss",
94 patience=early_stopping_patience,
95 restore_best_weights=True,
96 )
97 model.fit(
98 X,
99 y,
100 sample_weight=sample_weight,
101 epochs=self.epochs,
102 batch_size=self.batch_size,
103 validation_split=0.2,
104 callbacks=[early_stopping, EpochProgressCallback(total_epochs=self.epochs)],
105 verbose=0,
106 )
108 self.model = model
110 def predict(self, X):
111 """
112 Predict the output from the input.
114 :param X: the input data
115 :return: the predicted output
116 """
117 return self.model.predict(X, verbose=0)