import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import LSTM, Dropout, Dense, Input, Concatenate, MultiHeadAttention, LayerNormalization, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras import regularizers, backend as K
from tensorflow.keras.constraints import UnitNorm, MaxNorm
from tensorflow.keras.initializers import RandomUniform, GlorotUniform
from colorama import Fore, Style
from sklearn.metrics import mean_squared_error, mean_absolute_error
seed_value = 1
tf.random.set_seed(seed_value)
np.random.seed(seed_value)
dataset_1 = np.array([0.1, 0.4, 0.0, 0.4, 0.1, 0.0, 0.3, 0.1, 0.0, 0.1, 0.7, 0.1, 0.3, 0.1, 0.4, 0.0, 0.0, 0.1, 0.6, 0.0, 0.5, 0.0, 0.1, 0.3, 0.0, 0.0, 0.2, 0.1, 0.2, 0.4, 0.3, 0.1, 0.2, 0.7, 0.2, 0.0, 0.3, 0.7, 0.2, 0.4, 0.0, 0.1, 0.6, 0.3, 0.4, 0.5, 0.0, 0.2, 0.2, 0.0, 0.0, 0.3, 0.0, 0.5, 0.6, 0.7, 0.1, 0.3, 0.3, 0.2, 0.3, 0.2, 0.0, 0.4, 0.3, 0.6, 0.1, 0.1, 0.0, 0.1, 0.0, 0.5, 0.0, 0.2, 0.3, 0.1, 0.0, 0.3, 0.5, 0.2, 0.4, 0.1, 0.0, 0.4, 0.2, 0.3, 0.4, 0.7, 0.0, 0.1, 0.2, 0.2, 0.0, 0.1, 0.2, 0.0, 0.0, 0.3, 0.0, 0.1, 0.1, 0.3, 0.5, 0.0, 0.3, 0.1, 0.3, 0.4, 0.2, 0.1, 0.0, 0.1, 0.3, 0.3, 0.3, 0.0, 0.0, 0.3, 0.7, 0.1, 0.1, 0.5, 0.6, 0.2, 0.3, 0.3, 0.2, 0.2, 0.1, 0.0, 0.1, 0.1, 0.7, 0.0, 0.0, 0.2, 0.2, 0.1, 0.1, 0.4, 0.0, 0.3, 0.0, 0.5, 0.4, 0.2, 0.1, 0.2, 0.1, 0.6, 0.3, 0.2, 0.4, 0.3, 0.2, 0.3, 0.6, 0.5, 0.0, 0.1, 0.6, 0.1, 0.1, 0.0, 0.1, 0.0, 0.5, 0.2, 0.0, 0.2, 0.5, 0.1, 0.0, 0.1, 0.3, 0.3, 0.0, 0.0, 0.0, 0.4, 0.0, 0.1, 0.2, 0.3, 0.6, 0.2, 0.3, 0.2, 0.4, 0.4, 0.1, 0.1, 0.3])
dataset_2 = np.array([0.8, 0.6, 0.5, 0.5, 0.6, 0.3, 0.4, 0.3, 0.1, 0.2, 0.7, 0.2, 0.5, 0.4, 0.5, 0.2, 0.2, 0.7, 0.7, 0.2, 0.6, 0.3, 0.4, 0.4, 0.5, 0.7, 0.4, 0.4, 0.3, 0.7, 0.5, 0.2, 0.5, 0.7, 0.2, 0.4, 0.5, 0.8, 0.3, 0.7, 0.1, 0.2, 0.7, 0.5, 0.6, 0.6, 0.2, 0.4, 0.3, 0.0, 0.2, 0.5, 0.3, 0.8, 0.9, 0.7, 0.5, 0.3, 0.4, 0.8, 0.4, 0.8, 0.6, 0.5, 0.3, 0.6, 0.2, 0.4, 0.2, 0.3, 0.0, 0.7, 0.0, 0.5, 0.4, 0.4, 0.5, 0.3, 0.6, 0.3, 0.6, 0.3, 0.5, 0.5, 0.4, 0.7, 0.6, 0.7, 0.1, 0.4, 0.7, 0.6, 0.4, 0.3, 0.2, 0.3, 0.2, 0.4, 0.1, 0.7, 0.3, 0.5, 0.9, 0.5, 0.7, 0.7, 0.7, 0.7, 0.4, 0.4, 0.1, 0.4, 0.4, 0.5, 0.4, 0.5, 0.0, 0.6, 0.7, 0.2, 0.8, 0.7, 0.7, 0.5, 0.6, 0.6, 0.7, 0.6, 0.3, 0.0, 0.2, 0.4, 0.8, 0.1, 0.0, 0.5, 0.4, 0.4, 0.7, 0.4, 0.4, 0.5, 0.2, 0.5, 0.7, 0.3, 0.6, 0.3, 0.6, 0.7, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.8, 0.6, 0.6, 0.2, 0.6, 0.6, 0.4, 0.0, 0.7, 0.4, 0.6, 0.9, 0.3, 0.8, 0.8, 0.3, 0.1, 0.2, 0.4, 0.6, 0.7, 0.3, 0.3, 0.8, 0.2, 0.6, 0.5, 0.3, 0.8, 0.9, 0.8, 0.6, 0.4, 0.6, 0.5, 0.2, 0.3])
dataset_3 = np.array([0.9, 0.7, 0.6, 0.9, 0.8, 0.4, 0.6, 0.7, 0.6, 0.4, 0.8, 0.8, 0.8, 0.9, 0.8, 0.9, 0.5, 0.9, 0.9, 0.8, 0.7, 0.9, 0.6, 0.4, 0.9, 0.7, 0.6, 0.5, 0.5, 0.9, 0.5, 0.7, 0.6, 0.7, 0.3, 0.6, 0.9, 0.8, 0.8, 0.7, 0.6, 0.4, 0.7, 0.7, 0.9, 0.9, 0.7, 0.4, 0.6, 0.4, 0.7, 0.8, 0.7, 0.9, 0.9, 0.9, 0.6, 0.9, 0.9, 0.9, 0.8, 0.9, 0.8, 0.5, 0.5, 0.6, 0.5, 0.7, 0.9, 0.9, 0.6, 0.9, 0.3, 0.9, 0.7, 0.8, 0.9, 0.6, 0.7, 0.3, 0.6, 0.8, 0.8, 0.9, 0.5, 0.9, 0.9, 0.8, 0.4, 0.4, 0.9, 0.7, 0.6, 0.8, 0.8, 0.7, 0.7, 0.7, 0.6, 0.8, 0.9, 0.9, 0.9, 0.8, 0.9, 0.8, 0.8, 0.9, 0.8, 0.7, 0.8, 0.9, 0.5, 0.6, 0.6, 0.9, 0.6, 0.6, 0.9, 0.2, 0.9, 0.8, 0.8, 0.9, 0.7, 0.9, 0.8, 0.6, 0.7, 0.1, 0.9, 0.7, 0.8, 0.3, 0.6, 0.8, 0.9, 0.7, 0.9, 0.8, 0.8, 0.7, 0.5, 0.6, 0.7, 0.9, 0.6, 0.5, 0.9, 0.7, 0.5, 0.9, 0.8, 0.9, 0.9, 0.9, 0.9, 0.9, 0.7, 0.7, 0.8, 0.7, 0.8, 0.6, 0.7, 0.9, 0.7, 0.9, 0.4, 0.9, 0.9, 0.9, 0.5, 0.9, 0.9, 0.8, 0.7, 0.6, 0.9, 0.8, 0.9, 0.7, 0.6, 0.6, 0.9, 0.9, 0.8, 0.8, 0.5, 0.9, 0.6, 0.5, 0.5])
def prepare_data_with_lags(dataset, lag, look_back=1, features=1, outputs=1):
dataset = dataset.reshape(-1, 1)
input_data_X = []
output_data_y = []
for i in range(len(dataset) - look_back - lag):
a = dataset[i:(i + look_back + lag), :features]
input_data_X.append(a)
output_data_y.append(dataset[i + look_back + lag, :outputs])
return np.array(input_data_X), np.array(output_data_y)
best_lag_1 = 3
best_lag_2 = 3
best_lag_3 = 3
input_data_X_1, output_data_y_1 = prepare_data_with_lags(dataset_1, best_lag_1)
input_data_X_2, output_data_y_2 = prepare_data_with_lags(dataset_2, best_lag_2)
input_data_X_3, output_data_y_3 = prepare_data_with_lags(dataset_3, best_lag_3)
split_ratio = 0.65
split_index_1 = int(len(input_data_X_1) * split_ratio)
split_index_2 = int(len(input_data_X_2) * split_ratio)
split_index_3 = int(len(input_data_X_3) * split_ratio)
train_X_1, val_X_1 = input_data_X_1[:split_index_1], input_data_X_1[split_index_1:]
train_Y_1, val_Y_1 = output_data_y_1[:split_index_1], output_data_y_1[split_index_1:]
train_X_2, val_X_2 = input_data_X_2[:split_index_2], input_data_X_2[split_index_2:]
train_Y_2, val_Y_2 = output_data_y_2[:split_index_2], output_data_y_2[split_index_2:]
train_X_3, val_X_3 = input_data_X_3[:split_index_3], input_data_X_3[split_index_3:]
train_Y_3, val_Y_3 = output_data_y_3[:split_index_3], output_data_y_3[split_index_3:]
def create_tf_dataset(X, y, batch_size=1):
dataset = tf.data.Dataset.from_tensor_slices((X, y))
dataset = dataset.cache().shuffle(buffer_size=len(X)).batch(batch_size).prefetch(buffer_size=tf.data.AUTOTUNE)
return dataset
train_dataset_1 = create_tf_dataset(train_X_1, train_Y_1)
val_dataset_1 = create_tf_dataset(val_X_1, val_Y_1)
train_dataset_2 = create_tf_dataset(train_X_2, train_Y_2)
val_dataset_2 = create_tf_dataset(val_X_2, val_Y_2)
train_dataset_3 = create_tf_dataset(train_X_3, train_Y_3)
val_dataset_3 = create_tf_dataset(val_X_3, val_Y_3)
def build_model(input_shape):
encoder_inputs = Input(shape=input_shape)
encoder_lstm1, state_h1, state_c1 = (LSTM(32,
return_sequences=True,
return_state=True,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
unit_forget_bias=True,
go_backwards=False,
stateful=False,
unroll=False,
implementation=2,
bias_constraint= tf.keras.constraints.UnitNorm(axis=0),
kernel_constraint= tf.keras.constraints.UnitNorm(axis=0),
recurrent_constraint= tf.keras.constraints.UnitNorm(axis=0),
bias_initializer= RandomUniform(minval=-0.1, maxval=0.1),
kernel_initializer= RandomUniform(minval=-0.1, maxval=0.1),
recurrent_initializer= RandomUniform(minval=-0.1, maxval=0.1),
bias_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
kernel_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
activity_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_dropout=0.1,) (encoder_inputs))
encoder_lstm1 = LayerNormalization()(encoder_lstm1)
encoder_lstm1 = Dropout(0.1)(encoder_lstm1)
encoder_lstm2, state_h2, state_c2 = (LSTM(32,
return_sequences=True,
return_state=True,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
unit_forget_bias=True,
go_backwards=False,
stateful=False,
unroll=False,
implementation=2,
bias_constraint= tf.keras.constraints.UnitNorm(axis=0),
kernel_constraint= tf.keras.constraints.UnitNorm(axis=0),
recurrent_constraint= tf.keras.constraints.UnitNorm(axis=0),
bias_initializer= RandomUniform(minval=-0.1, maxval=0.1),
kernel_initializer= RandomUniform(minval=-0.1, maxval=0.1),
recurrent_initializer= RandomUniform(minval=-0.1, maxval=0.1),
bias_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
kernel_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
activity_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_dropout=0.1, ) (encoder_lstm1))
encoder_lstm2 = LayerNormalization()(encoder_lstm2)
encoder_lstm2 = Dropout(0.1)(encoder_lstm2)
encoder_lstm3, state_h3, state_c3 = (LSTM(32,
return_sequences=True,
return_state=True,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
unit_forget_bias=True,
go_backwards=False,
stateful=False,
unroll=False,
implementation=2,
bias_constraint= tf.keras.constraints.UnitNorm(axis=0),
kernel_constraint= tf.keras.constraints.UnitNorm(axis=0),
recurrent_constraint= tf.keras.constraints.UnitNorm(axis=0),
bias_initializer= RandomUniform(minval=-0.1, maxval=0.1),
kernel_initializer= RandomUniform(minval=-0.1, maxval=0.1),
recurrent_initializer= RandomUniform(minval=-0.1, maxval=0.1),
bias_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
kernel_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
activity_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_dropout=0.1,)(encoder_lstm2))
encoder_lstm3 = LayerNormalization()(encoder_lstm3)
encoder_lstm3 = Dropout(0.1)(encoder_lstm3)
encoder_states = [state_h3, state_c3]
decoder_inputs = Input(shape=input_shape)
decoder_lstm1, _, _ = (LSTM(32,
return_sequences=True,
return_state=True,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
unit_forget_bias=True,
go_backwards=False,
stateful=False,
unroll=False,
implementation=2,
bias_constraint= tf.keras.constraints.UnitNorm(axis=0),
kernel_constraint= tf.keras.constraints.UnitNorm(axis=0),
recurrent_constraint= tf.keras.constraints.UnitNorm(axis=0),
bias_initializer= RandomUniform(minval=-0.1, maxval=0.1),
kernel_initializer= RandomUniform(minval=-0.1, maxval=0.1),
recurrent_initializer= RandomUniform(minval=-0.1, maxval=0.1),
bias_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
kernel_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
activity_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_dropout=0.1,)(decoder_inputs, initial_state=encoder_states))
decoder_lstm1 = LayerNormalization()(decoder_lstm1)
decoder_lstm1 = Dropout(0.1)(decoder_lstm1)
decoder_lstm2, _, _ = (LSTM(32,
return_sequences=True,
return_state=True,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
unit_forget_bias=True,
go_backwards=False,
stateful=False,
unroll=False,
implementation=2,
bias_constraint= tf.keras.constraints.UnitNorm(axis=0),
kernel_constraint= tf.keras.constraints.UnitNorm(axis=0),
recurrent_constraint= tf.keras.constraints.UnitNorm(axis=0),
bias_initializer= RandomUniform(minval=-0.1, maxval=0.1),
kernel_initializer= RandomUniform(minval=-0.1, maxval=0.1),
recurrent_initializer= RandomUniform(minval=-0.1, maxval=0.1),
bias_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
kernel_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
activity_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_dropout=0.1,)(decoder_lstm1))
decoder_lstm2 = LayerNormalization()(decoder_lstm2)
decoder_lstm2 = Dropout(0.1)(decoder_lstm2)
decoder_lstm3, _, _ = (LSTM(32,
return_sequences=True,
return_state=True,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
unit_forget_bias=True,
go_backwards=False,
stateful=False,
unroll=False,
implementation=2,
bias_constraint= tf.keras.constraints.UnitNorm(axis=0),
kernel_constraint= tf.keras.constraints.UnitNorm(axis=0),
recurrent_constraint= tf.keras.constraints.UnitNorm(axis=0),
bias_initializer= RandomUniform(minval=-0.1, maxval=0.1),
kernel_initializer= RandomUniform(minval=-0.1, maxval=0.1),
recurrent_initializer= RandomUniform(minval=-0.1, maxval=0.1),
bias_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
kernel_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
activity_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
recurrent_dropout=0.1,)(decoder_lstm2))
decoder_lstm3 = LayerNormalization()(decoder_lstm3)
decoder_lstm3 = Dropout(0.1)(decoder_lstm3)
attention_output = MultiHeadAttention(num_heads=10, key_dim=10)(encoder_lstm3, decoder_lstm3)
attention_output = LayerNormalization()(attention_output + decoder_lstm3)
outputs = (Dense(1,
activation='tanh',
use_bias=True,
bias_initializer= RandomUniform(minval=-0.1, maxval=0.1),
kernel_initializer= RandomUniform(minval=-0.1, maxval=0.1),
bias_constraint= tf.keras.constraints.UnitNorm(axis=0),
kernel_constraint= tf.keras.constraints.UnitNorm(axis=0),
bias_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
activity_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001),
kernel_regularizer= regularizers.l1_l2(l1=0.00000001, l2=0.00000001))(attention_output))
model = Model([encoder_inputs, decoder_inputs], outputs)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001, clipvalue=1.0), loss='mean_squared_error',
metrics=[tf.keras.metrics.MeanSquaredError(),tf.keras.metrics.MeanAbsoluteError()])
return model
model_1 = build_model((train_X_1.shape[1], train_X_1.shape[2]))
model_2 = build_model((train_X_2.shape[1], train_X_2.shape[2]))
model_3 = build_model((train_X_3.shape[1], train_X_3.shape[2]))
early_stopping = EarlyStopping(monitor='val_loss', patience=30, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau( monitor='val_loss', patience=20, factor=0.1, min_lr=0.0000001)
model_checkpoint = ModelCheckpoint("best_model.weights.h5", monitor='val_loss', save_best_only=True, save_weights_only=True)
callbacks = [early_stopping, reduce_lr, model_checkpoint]
model_1.fit([train_X_1, train_X_1], train_Y_1, epochs=100, batch_size=1, validation_data=([val_X_1, val_X_1], val_Y_1),
callbacks=callbacks)
tf.keras.backend.clear_session()
model_2.fit([train_X_2, train_X_2], train_Y_2, epochs=100, batch_size=1, validation_data=([val_X_2, val_X_2], val_Y_2), callbacks=callbacks)
tf.keras.backend.clear_session()
model_3.fit([train_X_3, train_X_3], train_Y_3, epochs=100, batch_size=1, validation_data=([val_X_3, val_X_3], val_Y_3), callbacks=callbacks)
tf.keras.backend.clear_session()
predictions_1 = model_1.predict([val_X_1, val_X_1]).flatten()
predictions_2 = model_2.predict([val_X_2, val_X_2]).flatten()
predictions_3 = model_3.predict([val_X_3, val_X_3]).flatten()
last_value_1 = dataset_1[-best_lag_1:].reshape(1, best_lag_1, 1)
next_prediction_1 = model_1.predict([last_value_1, last_value_1]).flatten()
next_prediction_1 = np.clip(next_prediction_1, 0.00, 0.9)
last_value_2 = dataset_2[-best_lag_2:].reshape(1, best_lag_2, 1)
next_prediction_2 = model_2.predict([last_value_2, last_value_2]).flatten()
next_prediction_2 = np.clip(next_prediction_2, 0.00, 0.9)
last_value_3 = dataset_3[-best_lag_3:].reshape(1, best_lag_3, 1)
next_prediction_3 = model_3.predict([last_value_3, last_value_3]).flatten()
next_prediction_3 = np.clip(next_prediction_3, 0.00, 0.9)
all_predictions = [
round(next_prediction_1[0] * 10),
round(next_prediction_2[0] * 10),
round(next_prediction_3[0] * 10),
]
unique_sorted_predictions = sorted(set(all_predictions))
last_values = ", ".join(map(str, [round(dataset_1[-1], 2), round(dataset_2[-1], 2), round(dataset_3[-1], 2),]))
print(f"\n{Fore.RED}Πρόβλεψη επόμενης τιμής για το dataset 1: {Style.RESET_ALL}{next_prediction_1[0]:.1f}")
print(f"\n{Fore.BLUE}Πρόβλεψη επόμενης τιμής για το dataset 2: {Style.RESET_ALL}{next_prediction_2[0]:.1f}")
print(f"\n{Fore.BLUE}Πρόβλεψη επόμενης τιμής για το dataset 3: {Style.RESET_ALL}{next_prediction_3[0]:.1f}")
print(f"\n{Fore.RED}ΤΕΛΕΥΤΑΙΑ ΤΙΜΗ ΑΠΟ ΚΑΘΕ DATASET:{Style.RESET_ALL}{last_values}")
print(f"{Fore.GREEN}ΟΛΕΣ ΟΙ ΠΡΟΒΛΕΨΕΙΣ ΜΑΖΙ:{Style.RESET_ALL}{Fore.BLUE}{next_prediction_1[0]:.1f}, {next_prediction_2[0]:.1f},
{next_prediction_3[0]:.1f},{Style.RESET_ALL}")
print(f"\n{Fore.YELLOW}ΠΡΟΒΛΕΨΗ ΓΙΑ ΤΗΝ ΕΠΟΜΕΝΗ: {Style.RESET_ALL}{Fore.BLACK}{', '.join(f'{int(x * 1)}' for x in unique_sorted_predictions)}{Style.RESET_ALL}")
# Σχεδίαση προβλέψεων
def plot_predictions(true_values, predictions, title):
plt.figure(figsize=(12, 6))
plt.plot(true_values, label='True Values', linestyle='--', marker='o')
plt.plot(predictions, label='Predictions', linestyle='-', marker='x')
plt.title(title)
plt.xlabel('Time')
plt.ylabel('Value')
plt.legend()
plt.grid(True)
plt.show()
# Προσθήκη πραγματικών τιμών για σύγκριση
true_values_1 = dataset_1[-len(predictions_1):]
true_values_2 = dataset_2[-len(predictions_2):]
true_values_3 = dataset_3[-len(predictions_3):]
# Σχεδίαση προβλέψεων με πραγματικές τιμές
plot_predictions(true_values_1, predictions_1, 'Predictions for Dataset 1')
plot_predictions(true_values_2, predictions_2, 'Predictions for Dataset 2')
plot_predictions(true_values_3, predictions_3, 'Predictions for Dataset 3')
def rolling_forecast(model, data, steps, look_back):
predictions = []
input_seq = data[-look_back:].reshape((1, look_back, 1))
for _ in range(steps):
predicted_value = model.predict([input_seq, input_seq], verbose=2)
predictions.append(round(predicted_value[0, 0].item(), 1))
input_seq = np.roll(input_seq, -1)
input_seq[0, -1, 0] = predicted_value[0, 0].item()
return np.array(predictions)
next_steps = 1
predictions_rolling_1 = rolling_forecast(model_1, dataset_1, next_steps, best_lag_1)
predictions_rolling_2 = rolling_forecast(model_2, dataset_2, next_steps, best_lag_2)
predictions_rolling_3 = rolling_forecast(model_3, dataset_3, next_steps, best_lag_3)
print("Rolling Forecast Predictions for dataset 1:", predictions_rolling_1)
print("Rolling Forecast Predictions for dataset 2:", predictions_rolling_2)
print("Rolling Forecast Predictions for dataset 3:", predictions_rolling_3)
ΑΠΟΤΕΛΕΣΜΑΤΑ
Rolling Forecast Predictions for dataset 1: [0.2]
Rolling Forecast Predictions for dataset 2: [0.5]
Rolling Forecast Predictions for dataset 3: [0.7]