Dalam bagian ini akan menyiapkan dataset sampai siap untuk diolah atau dipreprocessing.
Tensorflow: Package untuk machine learning / AI
Numpy: Package untuk mengolah array
Pandas: Package untuk mengolah dataset
Pickle: Package untuk menyimpan variabel
# Mengambil Package2 yang akan digunakan
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
# Menggabungkan Netto_kg
df_aggregated = pd.read_csv('metadata_aggregated.csv') # Membaca file
df_aggregated = df_aggregated.groupby(['tanggal', 'kecamatan']).agg( # Menggabungkan banyak data dengan menjumlahkan 'netto_kg' untuk setiap 'tanggal' dan 'kecamatan' yang unik
{
'netto_kg': 'sum',
}
).reset_index()
df_aggregated.head()
| tanggal | kecamatan | netto_kg | |
|---|---|---|---|
| 0 | 01/01/2020 | Magelang Selatan | 12820 |
| 1 | 01/01/2020 | Magelang Tengah | 6750 |
| 2 | 01/01/2020 | Magelang Utara | 11195 |
| 3 | 01/01/2021 | Magelang Selatan | 18085 |
| 4 | 01/01/2021 | Magelang Tengah | 12510 |
df_aggregated.info() # menampilkan informasi tentang dataframe
<class 'pandas.core.frame.DataFrame'> RangeIndex: 5249 entries, 0 to 5248 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 tanggal 5249 non-null object 1 kecamatan 5249 non-null object 2 netto_kg 5249 non-null int64 dtypes: int64(1), object(2) memory usage: 123.1+ KB
df_aggregated['kecamatan'].value_counts() # menampilkan jumlah data setiap 'kecamatan'
Magelang Utara 1771 Magelang Selatan 1767 Magelang Tengah 1711 Name: kecamatan, dtype: int64
# Mengimport package untuk mengolah waktu dan normalisasi data
from datetime import datetime, timedelta # package untuk mengatur waktu/tanggal
from sklearn.preprocessing import MinMaxScaler # package untuk normalisasi (MinMaxScaler)
Normalisasi adalah proses untuk mengubah rentang data menjadi rentang tertentu, salah satu metode Normalisasi adalah MinMaxScaler, dimana rentang data akan menjadi 0 sampai 1 dengan tujuan untuk menyeimbangkan data dan membuat training model tidak memiliki parameter yang terlalu besar.
def min_max_normalize_dataframe(df, columns): # Untuk normalisasi dataframe
# Create a MinMaxScaler object
scaler = MinMaxScaler()
# Fit the scaler on the specified columns
scaler.fit(df[columns])
# Apply min-max normalization to the specified columns
df[columns] = scaler.transform(df[columns])
return df, scaler
def denormalize_dataframe(df, columns, scaler): # Untuk mengembalikan dataframe ke bentuk semula sebelum normalisasi
# Inverse transform the specified columns
df[columns] = scaler.inverse_transform(df[columns])
return df
def fill_missing_dates(df): # Untuk mengisi nilai kosong pada dataframe
df['tanggal'] = pd.to_datetime(df['tanggal'], format='%d/%m/%Y', dayfirst=True) # Mengubah format kolom 'tanggal' menjadi format tanggal, karena bisa saja kolom tersebut belum pada format tanggal
df = df.set_index('tanggal') # Mengubah index dataframe menjadi tanggal, hal ini digunakan karena ada fungsi bernama 'interpolate' pada pandas hanya bisa membaca index, sehingga kolom tanggal perlu diubah menjadi index
# Generate a date range from the minimum to maximum date in the 'tanggal' column
min_date = df.index.min() # Menentukan tanggal terkecil (oldest)
max_date = df.index.max() # Menentukan tanggal terbesar (newest)
date_range = pd.date_range(start=min_date, end=max_date) # Menentukan rentang tanggal dari terkecil hingga terbesar
df = df.reindex(date_range) # Menambahkan data berisikan Null (data kosong) ke dataframe yang tidak memuat tanggal pada data_range (tanggal yang kosong)
df['netto_kg'] = df['netto_kg'].interpolate(method='linear') # Mengisi nilai yang Null tadi dengan interpolate menggunakan metode 'linear', artinya data diisi dengan rata-rata antara data sebelumnya dan data berikutnya yang diketahui
df = df.reset_index() # Mengembalikan tanggal yang tadinya sebagai index kembali menjadi kolom, namun nama kolomnya akan menjadi 'index' bukan 'tanggal'
df = df.rename(columns={'index': 'tanggal'}) # Mengubah nama kolom 'index' kembali menjadi 'tanggal
return df
def check_missing_dates(df): # Untuk menentukan/mengecek apakah ada tanggal kosong pada dataframe, hal ini digunakan sekali saja untuk mengecek
# Generate a date range from the minimum to maximum date in the 'tanggal' column
min_date = df['tanggal'].min() # Menentukan tanggal terkecil
max_date = df['tanggal'].max() # Menentukan tanggal terbesar
date_range = [min_date + timedelta(days=x) for x in range((max_date - min_date).days + 1)] # Rentang tanggal
temp_tanggal = df['tanggal'].values
# Find missing dates
missing_dates = [date for date in date_range if date not in temp_tanggal]
# Print missing dates
if missing_dates:
print("Missing dates:")
for date in missing_dates:
print(date.strftime('%d/%m/%Y'))
else:
print("No missing dates found.")
# Membagi dataset ke magelang utara, tengah, dan selatan, hal ini dikarenakan model yang akan kita buat harus terpisah antar kolom, sedangkan semua kecamatan masih berada pada satu kolom yaitu kolom 'kecamatan'
df_utara = df_aggregated[df_aggregated['kecamatan'] == 'Magelang Utara'].drop(columns='kecamatan').reset_index(drop=True) # memisah dataframe untuk Magelang Utara
df_tengah = df_aggregated[df_aggregated['kecamatan'] == 'Magelang Tengah'].drop(columns='kecamatan').reset_index(drop=True) # memisah dataframe untuk Magelang Tengah
df_selatan = df_aggregated[df_aggregated['kecamatan'] == 'Magelang Selatan'].drop(columns='kecamatan').reset_index(drop=True) # Memisah dataframe untuk Magelang Selatan
df_utara = fill_missing_dates(df_utara).rename(columns={'netto_kg': 'magelang_utara'}) # mengisi tanggal kosong untuk Magelang Utara
df_tengah = fill_missing_dates(df_tengah).rename(columns={'netto_kg': 'magelang_tengah'}) # mengisi tanggal kosong untuk Magelang Tengah
df_selatan = fill_missing_dates(df_selatan).rename(columns={'netto_kg': 'magelang_selatan'}) # mengisi tanggal kosong untuk Magelang Selatan
df_timeseries = pd.merge(df_utara, df_tengah, 'left', 'tanggal') # Menggabungkan kembali ketiga dataframe menjadi setiap kolom
df_timeseries = pd.merge(df_timeseries, df_selatan, 'left', 'tanggal')
df_timeseries, scaler = min_max_normalize_dataframe(df_timeseries, ['magelang_utara', 'magelang_tengah', 'magelang_selatan']) # Normalisasi data dengan MinMaxScaler
df_timeseries.info()
<class 'pandas.core.frame.DataFrame'> Int64Index: 1824 entries, 0 to 1823 Data columns (total 4 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 tanggal 1824 non-null datetime64[ns] 1 magelang_utara 1824 non-null float64 2 magelang_tengah 1824 non-null float64 3 magelang_selatan 1824 non-null float64 dtypes: datetime64[ns](1), float64(3) memory usage: 71.2 KB
df_timeseries.head() # Menampilkan 5 data pertama
| tanggal | magelang_utara | magelang_tengah | magelang_selatan | |
|---|---|---|---|---|
| 0 | 2019-01-03 | 0.174009 | 0.001101 | 0.116399 |
| 1 | 2019-01-04 | 0.637558 | 0.231447 | 0.123363 |
| 2 | 2019-01-05 | 0.552547 | 0.194891 | 0.173437 |
| 3 | 2019-01-06 | 0.592419 | 0.181238 | 0.283203 |
| 4 | 2019-01-07 | 0.848929 | 0.664832 | 0.528934 |
df_timeseries.describe() # Menampilkan informasi statistik
| magelang_utara | magelang_tengah | magelang_selatan | |
|---|---|---|---|
| count | 1824.000000 | 1824.000000 | 1824.000000 |
| mean | 0.460543 | 0.451285 | 0.516479 |
| std | 0.180072 | 0.169291 | 0.184549 |
| min | 0.000000 | 0.000000 | 0.000000 |
| 25% | 0.363032 | 0.345959 | 0.406400 |
| 50% | 0.479326 | 0.460802 | 0.534240 |
| 75% | 0.584174 | 0.563312 | 0.647198 |
| max | 1.000000 | 1.000000 | 1.000000 |
# membuat fungsi untuk membuat dataset, dataset harus berisikan input dan output yang tetap semisal input 5 hari sebelumnya untuk memprediksi output 30 hari setelahnya
def make_dataset(df, columns, input_timesteps=10, output_timesteps=10):
# Iterate through the DataFrame to create input-output pairs
X = []
y = []
for i in range(len(df) - input_timesteps - output_timesteps + 1): # iterasi untuk setiap data
X.append(df[columns].iloc[i:i+input_timesteps].values)
y.append(df[columns].iloc[i+input_timesteps:i+input_timesteps+output_timesteps].values)
# Convert the input-output pairs into numpy arrays
X = np.array(X)
y = np.array(y)
print("Input shape:", X.shape)
print("Output shape:", y.shape)
return X, y
# memperlihatkan jumlah dataset
X_30, y_30 = make_dataset(df_timeseries, ['magelang_utara', 'magelang_tengah', 'magelang_selatan'], input_timesteps=30, output_timesteps=366) # Data dengan input 30 hari, output 366 hari
X_90, y_90 = make_dataset(df_timeseries, ['magelang_utara', 'magelang_tengah', 'magelang_selatan'], input_timesteps=90, output_timesteps=366) # Data dengan input 90 hari, output 366 hari
X_366, y_366 = make_dataset(df_timeseries, ['magelang_utara', 'magelang_tengah', 'magelang_selatan'], input_timesteps=366, output_timesteps=366) # Data dengan input 366 hari, output 366 hari
Input shape: (1429, 30, 3) Output shape: (1429, 366, 3) Input shape: (1369, 90, 3) Output shape: (1369, 366, 3) Input shape: (1093, 366, 3) Output shape: (1093, 366, 3)
Setiap model dibagi menjadi 3, pertama inputnya timesteps 30, kedua 90, ketiga 366
Model ini menggunakan 30 hari data sampah untuk memprediksi sampah tahun 2024
from keras.callbacks import ModelCheckpoint
# Membuat Model
model_LSTM = tf.keras.Sequential() # Model lapisan, model ini akan berisi lapisan-lapisan (layers) machine learning
model_LSTM.add(tf.keras.layers.LSTM(64, return_sequences=True, input_shape=(30, 3))) # Lapisan LSTM pertama
model_LSTM.add(tf.keras.layers.LSTM(64, return_sequences=True)) # Lapisan LSTM kedua
model_LSTM.add(tf.keras.layers.LSTM(256, return_sequences=False)) # Lapisan LSTM ketiga
model_LSTM.add(tf.keras.layers.Dense(1098, activation='relu')) # Lapisan Dense untuk output
model_LSTM.add(tf.keras.layers.Reshape((366, 3))) # Pembentukan output menjadi (366, 3) karena ada 366 hari dan 3 kecamatan
model_LSTM.summary() # Melihat summary dari model
Model: "sequential_20"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_40 (LSTM) (None, 30, 64) 17408
lstm_41 (LSTM) (None, 30, 64) 33024
lstm_42 (LSTM) (None, 256) 328704
dense_15 (Dense) (None, 1098) 282186
reshape_12 (Reshape) (None, 366, 3) 0
=================================================================
Total params: 661322 (2.52 MB)
Trainable params: 661322 (2.52 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
model_LSTM.compile(optimizer='adam', loss='mse') # Compile model dengan optimizer 'adam' dan loss 'mse'
Secara teknikal, Optimizer adalah metode untuk mengubah bobot setiap parameter dengan 'loss' dan parameter tambahan yang diketahui
ADAM (Adaptive Moment Estimation) adalah salah satu optimizer yang paling sering dipakai karena mudah untuk konvergen ke solusi
checkpoint = ModelCheckpoint('best_LSTM_model.h5', monitor='val_loss', save_best_only=True, verbose=1) # Checkpoint berfungsi untuk menyimpan model dengan kualitas terbaik
Setiap iterasi, model dengan validation loss terkecil akan disimpan.
Data sendiri akan dibagi menjadi 20% Data Test dan 80% Data Train dengan 'validation_split=0.2'
# Melatih Model
history = model_LSTM.fit(X_30, y_30, epochs=100, validation_split=0.2, callbacks=[checkpoint])
Epoch 1/100 36/36 [==============================] - ETA: 0s - loss: 0.1316 Epoch 1: val_loss improved from inf to 0.09574, saving model to best_LSTM_model.h5 36/36 [==============================] - 14s 215ms/step - loss: 0.1316 - val_loss: 0.0957 Epoch 2/100 36/36 [==============================] - ETA: 0s - loss: 0.1081 Epoch 2: val_loss improved from 0.09574 to 0.09466, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 147ms/step - loss: 0.1081 - val_loss: 0.0947 Epoch 3/100 36/36 [==============================] - ETA: 0s - loss: 0.1078 Epoch 3: val_loss improved from 0.09466 to 0.09441, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 177ms/step - loss: 0.1078 - val_loss: 0.0944 Epoch 4/100 36/36 [==============================] - ETA: 0s - loss: 0.1077 Epoch 4: val_loss did not improve from 0.09441 36/36 [==============================] - 5s 146ms/step - loss: 0.1077 - val_loss: 0.0949 Epoch 5/100 36/36 [==============================] - ETA: 0s - loss: 0.1077 Epoch 5: val_loss did not improve from 0.09441 36/36 [==============================] - 6s 178ms/step - loss: 0.1077 - val_loss: 0.0950 Epoch 6/100 36/36 [==============================] - ETA: 0s - loss: 0.1076 Epoch 6: val_loss improved from 0.09441 to 0.09434, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 147ms/step - loss: 0.1076 - val_loss: 0.0943 Epoch 7/100 36/36 [==============================] - ETA: 0s - loss: 0.1072 Epoch 7: val_loss improved from 0.09434 to 0.09416, saving model to best_LSTM_model.h5 36/36 [==============================] - 7s 187ms/step - loss: 0.1072 - val_loss: 0.0942 Epoch 8/100 36/36 [==============================] - ETA: 0s - loss: 0.1071 Epoch 8: val_loss improved from 0.09416 to 0.09414, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.1071 - val_loss: 0.0941 Epoch 9/100 36/36 [==============================] - ETA: 0s - loss: 0.1070 Epoch 9: val_loss did not improve from 0.09414 36/36 [==============================] - 6s 172ms/step - loss: 0.1070 - val_loss: 0.0942 Epoch 10/100 36/36 [==============================] - ETA: 0s - loss: 0.1069 Epoch 10: val_loss improved from 0.09414 to 0.09392, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 158ms/step - loss: 0.1069 - val_loss: 0.0939 Epoch 11/100 36/36 [==============================] - ETA: 0s - loss: 0.1067 Epoch 11: val_loss improved from 0.09392 to 0.09378, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 157ms/step - loss: 0.1067 - val_loss: 0.0938 Epoch 12/100 36/36 [==============================] - ETA: 0s - loss: 0.1063 Epoch 12: val_loss improved from 0.09378 to 0.09306, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 171ms/step - loss: 0.1063 - val_loss: 0.0931 Epoch 13/100 36/36 [==============================] - ETA: 0s - loss: 0.1060 Epoch 13: val_loss improved from 0.09306 to 0.09285, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 147ms/step - loss: 0.1060 - val_loss: 0.0928 Epoch 14/100 36/36 [==============================] - ETA: 0s - loss: 0.1058 Epoch 14: val_loss improved from 0.09285 to 0.09274, saving model to best_LSTM_model.h5 36/36 [==============================] - 7s 210ms/step - loss: 0.1058 - val_loss: 0.0927 Epoch 15/100 36/36 [==============================] - ETA: 0s - loss: 0.1049 Epoch 15: val_loss improved from 0.09274 to 0.09178, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.1049 - val_loss: 0.0918 Epoch 16/100 36/36 [==============================] - ETA: 0s - loss: 0.1040 Epoch 16: val_loss improved from 0.09178 to 0.09054, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 180ms/step - loss: 0.1040 - val_loss: 0.0905 Epoch 17/100 36/36 [==============================] - ETA: 0s - loss: 0.1032 Epoch 17: val_loss did not improve from 0.09054 36/36 [==============================] - 5s 147ms/step - loss: 0.1032 - val_loss: 0.0907 Epoch 18/100 36/36 [==============================] - ETA: 0s - loss: 0.1026 Epoch 18: val_loss improved from 0.09054 to 0.08995, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 180ms/step - loss: 0.1026 - val_loss: 0.0900 Epoch 19/100 36/36 [==============================] - ETA: 0s - loss: 0.1022 Epoch 19: val_loss improved from 0.08995 to 0.08958, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.1022 - val_loss: 0.0896 Epoch 20/100 36/36 [==============================] - ETA: 0s - loss: 0.1019 Epoch 20: val_loss improved from 0.08958 to 0.08906, saving model to best_LSTM_model.h5 36/36 [==============================] - 8s 215ms/step - loss: 0.1019 - val_loss: 0.0891 Epoch 21/100 36/36 [==============================] - ETA: 0s - loss: 0.1011 Epoch 21: val_loss improved from 0.08906 to 0.08792, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.1011 - val_loss: 0.0879 Epoch 22/100 36/36 [==============================] - ETA: 0s - loss: 0.0990 Epoch 22: val_loss improved from 0.08792 to 0.08448, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 163ms/step - loss: 0.0990 - val_loss: 0.0845 Epoch 23/100 36/36 [==============================] - ETA: 0s - loss: 0.0956 Epoch 23: val_loss improved from 0.08448 to 0.08311, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 161ms/step - loss: 0.0956 - val_loss: 0.0831 Epoch 24/100 36/36 [==============================] - ETA: 0s - loss: 0.0937 Epoch 24: val_loss improved from 0.08311 to 0.08114, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 150ms/step - loss: 0.0937 - val_loss: 0.0811 Epoch 25/100 36/36 [==============================] - ETA: 0s - loss: 0.0918 Epoch 25: val_loss improved from 0.08114 to 0.07963, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 179ms/step - loss: 0.0918 - val_loss: 0.0796 Epoch 26/100 36/36 [==============================] - ETA: 0s - loss: 0.0905 Epoch 26: val_loss improved from 0.07963 to 0.07834, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.0905 - val_loss: 0.0783 Epoch 27/100 36/36 [==============================] - ETA: 0s - loss: 0.0889 Epoch 27: val_loss improved from 0.07834 to 0.07652, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 179ms/step - loss: 0.0889 - val_loss: 0.0765 Epoch 28/100 36/36 [==============================] - ETA: 0s - loss: 0.0874 Epoch 28: val_loss did not improve from 0.07652 36/36 [==============================] - 5s 146ms/step - loss: 0.0874 - val_loss: 0.0766 Epoch 29/100 36/36 [==============================] - ETA: 0s - loss: 0.0867 Epoch 29: val_loss improved from 0.07652 to 0.07590, saving model to best_LSTM_model.h5 36/36 [==============================] - 7s 188ms/step - loss: 0.0867 - val_loss: 0.0759 Epoch 30/100 36/36 [==============================] - ETA: 0s - loss: 0.0862 Epoch 30: val_loss improved from 0.07590 to 0.07481, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0862 - val_loss: 0.0748 Epoch 31/100 36/36 [==============================] - ETA: 0s - loss: 0.0852 Epoch 31: val_loss improved from 0.07481 to 0.07444, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 182ms/step - loss: 0.0852 - val_loss: 0.0744 Epoch 32/100 36/36 [==============================] - ETA: 0s - loss: 0.0844 Epoch 32: val_loss improved from 0.07444 to 0.07364, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0844 - val_loss: 0.0736 Epoch 33/100 36/36 [==============================] - ETA: 0s - loss: 0.0838 Epoch 33: val_loss improved from 0.07364 to 0.07354, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 180ms/step - loss: 0.0838 - val_loss: 0.0735 Epoch 34/100 36/36 [==============================] - ETA: 0s - loss: 0.0831 Epoch 34: val_loss improved from 0.07354 to 0.07350, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 147ms/step - loss: 0.0831 - val_loss: 0.0735 Epoch 35/100 36/36 [==============================] - ETA: 0s - loss: 0.0828 Epoch 35: val_loss improved from 0.07350 to 0.07253, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 180ms/step - loss: 0.0828 - val_loss: 0.0725 Epoch 36/100 36/36 [==============================] - ETA: 0s - loss: 0.0815 Epoch 36: val_loss improved from 0.07253 to 0.07150, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.0815 - val_loss: 0.0715 Epoch 37/100 36/36 [==============================] - ETA: 0s - loss: 0.0795 Epoch 37: val_loss improved from 0.07150 to 0.06985, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 155ms/step - loss: 0.0795 - val_loss: 0.0699 Epoch 38/100 36/36 [==============================] - ETA: 0s - loss: 0.0785 Epoch 38: val_loss improved from 0.06985 to 0.06912, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 169ms/step - loss: 0.0785 - val_loss: 0.0691 Epoch 39/100 36/36 [==============================] - ETA: 0s - loss: 0.0773 Epoch 39: val_loss improved from 0.06912 to 0.06748, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.0773 - val_loss: 0.0675 Epoch 40/100 36/36 [==============================] - ETA: 0s - loss: 0.0759 Epoch 40: val_loss improved from 0.06748 to 0.06679, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 179ms/step - loss: 0.0759 - val_loss: 0.0668 Epoch 41/100 36/36 [==============================] - ETA: 0s - loss: 0.0748 Epoch 41: val_loss improved from 0.06679 to 0.06652, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.0748 - val_loss: 0.0665 Epoch 42/100 36/36 [==============================] - ETA: 0s - loss: 0.0734 Epoch 42: val_loss improved from 0.06652 to 0.06393, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 178ms/step - loss: 0.0734 - val_loss: 0.0639 Epoch 43/100 36/36 [==============================] - ETA: 0s - loss: 0.0708 Epoch 43: val_loss improved from 0.06393 to 0.06151, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 147ms/step - loss: 0.0708 - val_loss: 0.0615 Epoch 44/100 36/36 [==============================] - ETA: 0s - loss: 0.0686 Epoch 44: val_loss improved from 0.06151 to 0.06133, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 177ms/step - loss: 0.0686 - val_loss: 0.0613 Epoch 45/100 36/36 [==============================] - ETA: 0s - loss: 0.0675 Epoch 45: val_loss improved from 0.06133 to 0.05925, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0675 - val_loss: 0.0592 Epoch 46/100 36/36 [==============================] - ETA: 0s - loss: 0.0657 Epoch 46: val_loss improved from 0.05925 to 0.05721, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 180ms/step - loss: 0.0657 - val_loss: 0.0572 Epoch 47/100 36/36 [==============================] - ETA: 0s - loss: 0.0640 Epoch 47: val_loss did not improve from 0.05721 36/36 [==============================] - 5s 149ms/step - loss: 0.0640 - val_loss: 0.0573 Epoch 48/100 36/36 [==============================] - ETA: 0s - loss: 0.0625 Epoch 48: val_loss improved from 0.05721 to 0.05469, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 159ms/step - loss: 0.0625 - val_loss: 0.0547 Epoch 49/100 36/36 [==============================] - ETA: 0s - loss: 0.0605 Epoch 49: val_loss improved from 0.05469 to 0.05362, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 166ms/step - loss: 0.0605 - val_loss: 0.0536 Epoch 50/100 36/36 [==============================] - ETA: 0s - loss: 0.0593 Epoch 50: val_loss improved from 0.05362 to 0.05301, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 147ms/step - loss: 0.0593 - val_loss: 0.0530 Epoch 51/100 36/36 [==============================] - ETA: 0s - loss: 0.0588 Epoch 51: val_loss improved from 0.05301 to 0.05243, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 179ms/step - loss: 0.0588 - val_loss: 0.0524 Epoch 52/100 36/36 [==============================] - ETA: 0s - loss: 0.0577 Epoch 52: val_loss improved from 0.05243 to 0.05204, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.0577 - val_loss: 0.0520 Epoch 53/100 36/36 [==============================] - ETA: 0s - loss: 0.0565 Epoch 53: val_loss improved from 0.05204 to 0.05096, saving model to best_LSTM_model.h5 36/36 [==============================] - 8s 219ms/step - loss: 0.0565 - val_loss: 0.0510 Epoch 54/100 36/36 [==============================] - ETA: 0s - loss: 0.0540 Epoch 54: val_loss improved from 0.05096 to 0.04828, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 147ms/step - loss: 0.0540 - val_loss: 0.0483 Epoch 55/100 36/36 [==============================] - ETA: 0s - loss: 0.0517 Epoch 55: val_loss improved from 0.04828 to 0.04535, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 180ms/step - loss: 0.0517 - val_loss: 0.0454 Epoch 56/100 36/36 [==============================] - ETA: 0s - loss: 0.0490 Epoch 56: val_loss improved from 0.04535 to 0.04435, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 145ms/step - loss: 0.0490 - val_loss: 0.0443 Epoch 57/100 36/36 [==============================] - ETA: 0s - loss: 0.0474 Epoch 57: val_loss improved from 0.04435 to 0.04328, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 168ms/step - loss: 0.0474 - val_loss: 0.0433 Epoch 58/100 36/36 [==============================] - ETA: 0s - loss: 0.0461 Epoch 58: val_loss improved from 0.04328 to 0.04238, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 155ms/step - loss: 0.0461 - val_loss: 0.0424 Epoch 59/100 36/36 [==============================] - ETA: 0s - loss: 0.0448 Epoch 59: val_loss improved from 0.04238 to 0.04098, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 157ms/step - loss: 0.0448 - val_loss: 0.0410 Epoch 60/100 36/36 [==============================] - ETA: 0s - loss: 0.0435 Epoch 60: val_loss improved from 0.04098 to 0.04060, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 170ms/step - loss: 0.0435 - val_loss: 0.0406 Epoch 61/100 36/36 [==============================] - ETA: 0s - loss: 0.0422 Epoch 61: val_loss improved from 0.04060 to 0.03868, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.0422 - val_loss: 0.0387 Epoch 62/100 36/36 [==============================] - ETA: 0s - loss: 0.0409 Epoch 62: val_loss improved from 0.03868 to 0.03844, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 180ms/step - loss: 0.0409 - val_loss: 0.0384 Epoch 63/100 36/36 [==============================] - ETA: 0s - loss: 0.0403 Epoch 63: val_loss improved from 0.03844 to 0.03779, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 146ms/step - loss: 0.0403 - val_loss: 0.0378 Epoch 64/100 36/36 [==============================] - ETA: 0s - loss: 0.0396 Epoch 64: val_loss improved from 0.03779 to 0.03669, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 178ms/step - loss: 0.0396 - val_loss: 0.0367 Epoch 65/100 36/36 [==============================] - ETA: 0s - loss: 0.0385 Epoch 65: val_loss improved from 0.03669 to 0.03662, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 148ms/step - loss: 0.0385 - val_loss: 0.0366 Epoch 66/100 36/36 [==============================] - ETA: 0s - loss: 0.0374 Epoch 66: val_loss improved from 0.03662 to 0.03543, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 179ms/step - loss: 0.0374 - val_loss: 0.0354 Epoch 67/100 36/36 [==============================] - ETA: 0s - loss: 0.0364 Epoch 67: val_loss improved from 0.03543 to 0.03413, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0364 - val_loss: 0.0341 Epoch 68/100 36/36 [==============================] - ETA: 0s - loss: 0.0348 Epoch 68: val_loss improved from 0.03413 to 0.03387, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 181ms/step - loss: 0.0348 - val_loss: 0.0339 Epoch 69/100 36/36 [==============================] - ETA: 0s - loss: 0.0340 Epoch 69: val_loss improved from 0.03387 to 0.03366, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0340 - val_loss: 0.0337 Epoch 70/100 36/36 [==============================] - ETA: 0s - loss: 0.0335 Epoch 70: val_loss improved from 0.03366 to 0.03346, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 179ms/step - loss: 0.0335 - val_loss: 0.0335 Epoch 71/100 36/36 [==============================] - ETA: 0s - loss: 0.0335 Epoch 71: val_loss improved from 0.03346 to 0.03346, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 146ms/step - loss: 0.0335 - val_loss: 0.0335 Epoch 72/100 36/36 [==============================] - ETA: 0s - loss: 0.0331 Epoch 72: val_loss did not improve from 0.03346 36/36 [==============================] - 5s 151ms/step - loss: 0.0331 - val_loss: 0.0336 Epoch 73/100 36/36 [==============================] - ETA: 0s - loss: 0.0321 Epoch 73: val_loss improved from 0.03346 to 0.03322, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 170ms/step - loss: 0.0321 - val_loss: 0.0332 Epoch 74/100 36/36 [==============================] - ETA: 0s - loss: 0.0315 Epoch 74: val_loss improved from 0.03322 to 0.03227, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0315 - val_loss: 0.0323 Epoch 75/100 36/36 [==============================] - ETA: 0s - loss: 0.0309 Epoch 75: val_loss improved from 0.03227 to 0.03182, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 178ms/step - loss: 0.0309 - val_loss: 0.0318 Epoch 76/100 36/36 [==============================] - ETA: 0s - loss: 0.0299 Epoch 76: val_loss improved from 0.03182 to 0.03178, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0299 - val_loss: 0.0318 Epoch 77/100 36/36 [==============================] - ETA: 0s - loss: 0.0296 Epoch 77: val_loss improved from 0.03178 to 0.03160, saving model to best_LSTM_model.h5 36/36 [==============================] - 6s 178ms/step - loss: 0.0296 - val_loss: 0.0316 Epoch 78/100 36/36 [==============================] - ETA: 0s - loss: 0.0293 Epoch 78: val_loss improved from 0.03160 to 0.03123, saving model to best_LSTM_model.h5 36/36 [==============================] - 5s 149ms/step - loss: 0.0293 - val_loss: 0.0312 Epoch 79/100 36/36 [==============================] - ETA: 0s - loss: 0.0288 Epoch 79: val_loss did not improve from 0.03123 36/36 [==============================] - 7s 184ms/step - loss: 0.0288 - val_loss: 0.0320 Epoch 80/100 36/36 [==============================] - ETA: 0s - loss: 0.0287 Epoch 80: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 146ms/step - loss: 0.0287 - val_loss: 0.0318 Epoch 81/100 36/36 [==============================] - ETA: 0s - loss: 0.0286 Epoch 81: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 173ms/step - loss: 0.0286 - val_loss: 0.0319 Epoch 82/100 36/36 [==============================] - ETA: 0s - loss: 0.0284 Epoch 82: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 147ms/step - loss: 0.0284 - val_loss: 0.0321 Epoch 83/100 36/36 [==============================] - ETA: 0s - loss: 0.0281 Epoch 83: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 157ms/step - loss: 0.0281 - val_loss: 0.0318 Epoch 84/100 36/36 [==============================] - ETA: 0s - loss: 0.0276 Epoch 84: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 168ms/step - loss: 0.0276 - val_loss: 0.0317 Epoch 85/100 36/36 [==============================] - ETA: 0s - loss: 0.0273 Epoch 85: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 148ms/step - loss: 0.0273 - val_loss: 0.0323 Epoch 86/100 36/36 [==============================] - ETA: 0s - loss: 0.0271 Epoch 86: val_loss did not improve from 0.03123 36/36 [==============================] - 8s 212ms/step - loss: 0.0271 - val_loss: 0.0321 Epoch 87/100 36/36 [==============================] - ETA: 0s - loss: 0.0268 Epoch 87: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 146ms/step - loss: 0.0268 - val_loss: 0.0325 Epoch 88/100 36/36 [==============================] - ETA: 0s - loss: 0.0272 Epoch 88: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 178ms/step - loss: 0.0272 - val_loss: 0.0325 Epoch 89/100 36/36 [==============================] - ETA: 0s - loss: 0.0268 Epoch 89: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 144ms/step - loss: 0.0268 - val_loss: 0.0322 Epoch 90/100 36/36 [==============================] - ETA: 0s - loss: 0.0264 Epoch 90: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 177ms/step - loss: 0.0264 - val_loss: 0.0323 Epoch 91/100 36/36 [==============================] - ETA: 0s - loss: 0.0261 Epoch 91: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 146ms/step - loss: 0.0261 - val_loss: 0.0335 Epoch 92/100 36/36 [==============================] - ETA: 0s - loss: 0.0260 Epoch 92: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 160ms/step - loss: 0.0260 - val_loss: 0.0331 Epoch 93/100 36/36 [==============================] - ETA: 0s - loss: 0.0255 Epoch 93: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 162ms/step - loss: 0.0255 - val_loss: 0.0327 Epoch 94/100 36/36 [==============================] - ETA: 0s - loss: 0.0253 Epoch 94: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 145ms/step - loss: 0.0253 - val_loss: 0.0330 Epoch 95/100 36/36 [==============================] - ETA: 0s - loss: 0.0254 Epoch 95: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 178ms/step - loss: 0.0254 - val_loss: 0.0333 Epoch 96/100 36/36 [==============================] - ETA: 0s - loss: 0.0249 Epoch 96: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 147ms/step - loss: 0.0249 - val_loss: 0.0329 Epoch 97/100 36/36 [==============================] - ETA: 0s - loss: 0.0246 Epoch 97: val_loss did not improve from 0.03123 36/36 [==============================] - 6s 180ms/step - loss: 0.0246 - val_loss: 0.0332 Epoch 98/100 36/36 [==============================] - ETA: 0s - loss: 0.0245 Epoch 98: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 148ms/step - loss: 0.0245 - val_loss: 0.0336 Epoch 99/100 36/36 [==============================] - ETA: 0s - loss: 0.0242 Epoch 99: val_loss did not improve from 0.03123 36/36 [==============================] - 7s 187ms/step - loss: 0.0242 - val_loss: 0.0333 Epoch 100/100 36/36 [==============================] - ETA: 0s - loss: 0.0239 Epoch 100: val_loss did not improve from 0.03123 36/36 [==============================] - 5s 147ms/step - loss: 0.0239 - val_loss: 0.0331
import matplotlib.pyplot as plt # Package untuk visualisasi data
# Visualisasi model
# Get the training loss from the history
loss = history.history['loss']
# Get the validation loss from the history
val_loss = history.history['val_loss']
# Get the number of epochs
epochs = range(1, len(loss) + 1)
# Plot training loss in green
plt.plot(epochs, loss, 'g', label='Training loss')
# Find the epoch with the best validation loss
best_epoch = val_loss.index(min(val_loss)) + 1
# Plot validation loss in orange
plt.plot(epochs, val_loss, 'orange', label='Validation loss')
# Mark the epoch with the best validation loss
plt.plot(best_epoch, min(val_loss), 'ro', label='Best Validation Loss')
plt.title('LSTM Model 30 Days')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
with open("history.pkl", 'wb') as f: # Menyimpan history model
pickle.dump(history, f)
Sama dengan sebelumnya, melainkan kali ini menggunakan 90 hari untuk memprediksi sampah 2024
from keras.callbacks import ModelCheckpoint
model_LSTM_90 = tf.keras.Sequential()
model_LSTM_90.add(tf.keras.layers.LSTM(64, return_sequences=True, input_shape=(90, 3)))
model_LSTM_90.add(tf.keras.layers.LSTM(64, return_sequences=True))
model_LSTM_90.add(tf.keras.layers.LSTM(256, return_sequences=False))
model_LSTM_90.add(tf.keras.layers.Dense(1098, activation='relu'))
model_LSTM_90.add(tf.keras.layers.Reshape((366, 3)))
model_LSTM_90.summary()
Model: "sequential_21"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_43 (LSTM) (None, 90, 64) 17408
lstm_44 (LSTM) (None, 90, 64) 33024
lstm_45 (LSTM) (None, 256) 328704
dense_16 (Dense) (None, 1098) 282186
reshape_13 (Reshape) (None, 366, 3) 0
=================================================================
Total params: 661322 (2.52 MB)
Trainable params: 661322 (2.52 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
model_LSTM_90.compile(optimizer='adam', loss='mse')
checkpoint_90 = ModelCheckpoint('best_LSTM_90_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
history_90 = model_LSTM_90.fit(X_90, y_90, epochs=100, validation_split=0.2, callbacks=[checkpoint_90])
Epoch 1/100 35/35 [==============================] - ETA: 0s - loss: 0.1313 Epoch 1: val_loss improved from inf to 0.09376, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 22s 464ms/step - loss: 0.1313 - val_loss: 0.0938 Epoch 2/100
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3103: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
saving_api.save_model(
35/35 [==============================] - ETA: 0s - loss: 0.1061 Epoch 2: val_loss improved from 0.09376 to 0.09177, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 457ms/step - loss: 0.1061 - val_loss: 0.0918 Epoch 3/100 35/35 [==============================] - ETA: 0s - loss: 0.1055 Epoch 3: val_loss did not improve from 0.09177 35/35 [==============================] - 16s 454ms/step - loss: 0.1055 - val_loss: 0.0921 Epoch 4/100 35/35 [==============================] - ETA: 0s - loss: 0.1055 Epoch 4: val_loss improved from 0.09177 to 0.09164, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 424ms/step - loss: 0.1055 - val_loss: 0.0916 Epoch 5/100 35/35 [==============================] - ETA: 0s - loss: 0.1053 Epoch 5: val_loss did not improve from 0.09164 35/35 [==============================] - 15s 429ms/step - loss: 0.1053 - val_loss: 0.0918 Epoch 6/100 35/35 [==============================] - ETA: 0s - loss: 0.1053 Epoch 6: val_loss did not improve from 0.09164 35/35 [==============================] - 15s 430ms/step - loss: 0.1053 - val_loss: 0.0919 Epoch 7/100 35/35 [==============================] - ETA: 0s - loss: 0.1050 Epoch 7: val_loss did not improve from 0.09164 35/35 [==============================] - 15s 426ms/step - loss: 0.1050 - val_loss: 0.0917 Epoch 8/100 35/35 [==============================] - ETA: 0s - loss: 0.1047 Epoch 8: val_loss improved from 0.09164 to 0.09135, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 17s 476ms/step - loss: 0.1047 - val_loss: 0.0914 Epoch 9/100 35/35 [==============================] - ETA: 0s - loss: 0.1042 Epoch 9: val_loss improved from 0.09135 to 0.09035, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 424ms/step - loss: 0.1042 - val_loss: 0.0903 Epoch 10/100 35/35 [==============================] - ETA: 0s - loss: 0.1038 Epoch 10: val_loss improved from 0.09035 to 0.08989, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 437ms/step - loss: 0.1038 - val_loss: 0.0899 Epoch 11/100 35/35 [==============================] - ETA: 0s - loss: 0.1030 Epoch 11: val_loss improved from 0.08989 to 0.08916, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 426ms/step - loss: 0.1030 - val_loss: 0.0892 Epoch 12/100 35/35 [==============================] - ETA: 0s - loss: 0.1029 Epoch 12: val_loss did not improve from 0.08916 35/35 [==============================] - 15s 423ms/step - loss: 0.1029 - val_loss: 0.0895 Epoch 13/100 35/35 [==============================] - ETA: 0s - loss: 0.1027 Epoch 13: val_loss did not improve from 0.08916 35/35 [==============================] - 16s 459ms/step - loss: 0.1027 - val_loss: 0.0892 Epoch 14/100 35/35 [==============================] - ETA: 0s - loss: 0.1023 Epoch 14: val_loss improved from 0.08916 to 0.08843, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 456ms/step - loss: 0.1023 - val_loss: 0.0884 Epoch 15/100 35/35 [==============================] - ETA: 0s - loss: 0.1019 Epoch 15: val_loss improved from 0.08843 to 0.08760, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 435ms/step - loss: 0.1019 - val_loss: 0.0876 Epoch 16/100 35/35 [==============================] - ETA: 0s - loss: 0.1009 Epoch 16: val_loss did not improve from 0.08760 35/35 [==============================] - 15s 424ms/step - loss: 0.1009 - val_loss: 0.0880 Epoch 17/100 35/35 [==============================] - ETA: 0s - loss: 0.1006 Epoch 17: val_loss improved from 0.08760 to 0.08698, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.1006 - val_loss: 0.0870 Epoch 18/100 35/35 [==============================] - ETA: 0s - loss: 0.0995 Epoch 18: val_loss did not improve from 0.08698 35/35 [==============================] - 16s 452ms/step - loss: 0.0995 - val_loss: 0.0872 Epoch 19/100 35/35 [==============================] - ETA: 0s - loss: 0.0989 Epoch 19: val_loss improved from 0.08698 to 0.08594, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 430ms/step - loss: 0.0989 - val_loss: 0.0859 Epoch 20/100 35/35 [==============================] - ETA: 0s - loss: 0.0983 Epoch 20: val_loss did not improve from 0.08594 35/35 [==============================] - 15s 425ms/step - loss: 0.0983 - val_loss: 0.0882 Epoch 21/100 35/35 [==============================] - ETA: 0s - loss: 0.0964 Epoch 21: val_loss improved from 0.08594 to 0.08432, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 432ms/step - loss: 0.0964 - val_loss: 0.0843 Epoch 22/100 35/35 [==============================] - ETA: 0s - loss: 0.0944 Epoch 22: val_loss improved from 0.08432 to 0.08331, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0944 - val_loss: 0.0833 Epoch 23/100 35/35 [==============================] - ETA: 0s - loss: 0.0921 Epoch 23: val_loss improved from 0.08331 to 0.07835, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 457ms/step - loss: 0.0921 - val_loss: 0.0783 Epoch 24/100 35/35 [==============================] - ETA: 0s - loss: 0.0890 Epoch 24: val_loss improved from 0.07835 to 0.07690, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0890 - val_loss: 0.0769 Epoch 25/100 35/35 [==============================] - ETA: 0s - loss: 0.0858 Epoch 25: val_loss improved from 0.07690 to 0.07363, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0858 - val_loss: 0.0736 Epoch 26/100 35/35 [==============================] - ETA: 0s - loss: 0.0845 Epoch 26: val_loss improved from 0.07363 to 0.07264, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 425ms/step - loss: 0.0845 - val_loss: 0.0726 Epoch 27/100 35/35 [==============================] - ETA: 0s - loss: 0.0834 Epoch 27: val_loss improved from 0.07264 to 0.07164, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 470ms/step - loss: 0.0834 - val_loss: 0.0716 Epoch 28/100 35/35 [==============================] - ETA: 0s - loss: 0.0826 Epoch 28: val_loss improved from 0.07164 to 0.07027, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 444ms/step - loss: 0.0826 - val_loss: 0.0703 Epoch 29/100 35/35 [==============================] - ETA: 0s - loss: 0.0819 Epoch 29: val_loss did not improve from 0.07027 35/35 [==============================] - 15s 428ms/step - loss: 0.0819 - val_loss: 0.0723 Epoch 30/100 35/35 [==============================] - ETA: 0s - loss: 0.0816 Epoch 30: val_loss improved from 0.07027 to 0.06996, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 428ms/step - loss: 0.0816 - val_loss: 0.0700 Epoch 31/100 35/35 [==============================] - ETA: 0s - loss: 0.0812 Epoch 31: val_loss did not improve from 0.06996 35/35 [==============================] - 15s 426ms/step - loss: 0.0812 - val_loss: 0.0716 Epoch 32/100 35/35 [==============================] - ETA: 0s - loss: 0.0809 Epoch 32: val_loss improved from 0.06996 to 0.06990, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0809 - val_loss: 0.0699 Epoch 33/100 35/35 [==============================] - ETA: 0s - loss: 0.0807 Epoch 33: val_loss did not improve from 0.06990 35/35 [==============================] - 16s 458ms/step - loss: 0.0807 - val_loss: 0.0708 Epoch 34/100 35/35 [==============================] - ETA: 0s - loss: 0.0803 Epoch 34: val_loss improved from 0.06990 to 0.06892, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 426ms/step - loss: 0.0803 - val_loss: 0.0689 Epoch 35/100 35/35 [==============================] - ETA: 0s - loss: 0.0794 Epoch 35: val_loss did not improve from 0.06892 35/35 [==============================] - 15s 426ms/step - loss: 0.0794 - val_loss: 0.0693 Epoch 36/100 35/35 [==============================] - ETA: 0s - loss: 0.0790 Epoch 36: val_loss improved from 0.06892 to 0.06820, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 424ms/step - loss: 0.0790 - val_loss: 0.0682 Epoch 37/100 35/35 [==============================] - ETA: 0s - loss: 0.0782 Epoch 37: val_loss did not improve from 0.06820 35/35 [==============================] - 15s 424ms/step - loss: 0.0782 - val_loss: 0.0682 Epoch 38/100 35/35 [==============================] - ETA: 0s - loss: 0.0777 Epoch 38: val_loss improved from 0.06820 to 0.06672, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 473ms/step - loss: 0.0777 - val_loss: 0.0667 Epoch 39/100 35/35 [==============================] - ETA: 0s - loss: 0.0764 Epoch 39: val_loss improved from 0.06672 to 0.06540, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0764 - val_loss: 0.0654 Epoch 40/100 35/35 [==============================] - ETA: 0s - loss: 0.0748 Epoch 40: val_loss improved from 0.06540 to 0.06445, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 458ms/step - loss: 0.0748 - val_loss: 0.0645 Epoch 41/100 35/35 [==============================] - ETA: 0s - loss: 0.0730 Epoch 41: val_loss improved from 0.06445 to 0.06284, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 425ms/step - loss: 0.0730 - val_loss: 0.0628 Epoch 42/100 35/35 [==============================] - ETA: 0s - loss: 0.0720 Epoch 42: val_loss improved from 0.06284 to 0.06253, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 424ms/step - loss: 0.0720 - val_loss: 0.0625 Epoch 43/100 35/35 [==============================] - ETA: 0s - loss: 0.0709 Epoch 43: val_loss improved from 0.06253 to 0.06114, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 434ms/step - loss: 0.0709 - val_loss: 0.0611 Epoch 44/100 35/35 [==============================] - ETA: 0s - loss: 0.0693 Epoch 44: val_loss improved from 0.06114 to 0.05921, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 444ms/step - loss: 0.0693 - val_loss: 0.0592 Epoch 45/100 35/35 [==============================] - ETA: 0s - loss: 0.0675 Epoch 45: val_loss improved from 0.05921 to 0.05770, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 426ms/step - loss: 0.0675 - val_loss: 0.0577 Epoch 46/100 35/35 [==============================] - ETA: 0s - loss: 0.0664 Epoch 46: val_loss improved from 0.05770 to 0.05753, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0664 - val_loss: 0.0575 Epoch 47/100 35/35 [==============================] - ETA: 0s - loss: 0.0646 Epoch 47: val_loss improved from 0.05753 to 0.05562, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0646 - val_loss: 0.0556 Epoch 48/100 35/35 [==============================] - ETA: 0s - loss: 0.0628 Epoch 48: val_loss improved from 0.05562 to 0.05508, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 424ms/step - loss: 0.0628 - val_loss: 0.0551 Epoch 49/100 35/35 [==============================] - ETA: 0s - loss: 0.0612 Epoch 49: val_loss improved from 0.05508 to 0.05237, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 459ms/step - loss: 0.0612 - val_loss: 0.0524 Epoch 50/100 35/35 [==============================] - ETA: 0s - loss: 0.0591 Epoch 50: val_loss improved from 0.05237 to 0.05140, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 430ms/step - loss: 0.0591 - val_loss: 0.0514 Epoch 51/100 35/35 [==============================] - ETA: 0s - loss: 0.0579 Epoch 51: val_loss improved from 0.05140 to 0.05084, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 425ms/step - loss: 0.0579 - val_loss: 0.0508 Epoch 52/100 35/35 [==============================] - ETA: 0s - loss: 0.0565 Epoch 52: val_loss improved from 0.05084 to 0.04909, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 428ms/step - loss: 0.0565 - val_loss: 0.0491 Epoch 53/100 35/35 [==============================] - ETA: 0s - loss: 0.0542 Epoch 53: val_loss improved from 0.04909 to 0.04833, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 455ms/step - loss: 0.0542 - val_loss: 0.0483 Epoch 54/100 35/35 [==============================] - ETA: 0s - loss: 0.0521 Epoch 54: val_loss improved from 0.04833 to 0.04566, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 456ms/step - loss: 0.0521 - val_loss: 0.0457 Epoch 55/100 35/35 [==============================] - ETA: 0s - loss: 0.0507 Epoch 55: val_loss improved from 0.04566 to 0.04438, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0507 - val_loss: 0.0444 Epoch 56/100 35/35 [==============================] - ETA: 0s - loss: 0.0488 Epoch 56: val_loss improved from 0.04438 to 0.04342, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 425ms/step - loss: 0.0488 - val_loss: 0.0434 Epoch 57/100 35/35 [==============================] - ETA: 0s - loss: 0.0470 Epoch 57: val_loss improved from 0.04342 to 0.04226, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 426ms/step - loss: 0.0470 - val_loss: 0.0423 Epoch 58/100 35/35 [==============================] - ETA: 0s - loss: 0.0452 Epoch 58: val_loss improved from 0.04226 to 0.04001, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 429ms/step - loss: 0.0452 - val_loss: 0.0400 Epoch 59/100 35/35 [==============================] - ETA: 0s - loss: 0.0445 Epoch 59: val_loss improved from 0.04001 to 0.03955, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 439ms/step - loss: 0.0445 - val_loss: 0.0396 Epoch 60/100 35/35 [==============================] - ETA: 0s - loss: 0.0434 Epoch 60: val_loss improved from 0.03955 to 0.03912, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 430ms/step - loss: 0.0434 - val_loss: 0.0391 Epoch 61/100 35/35 [==============================] - ETA: 0s - loss: 0.0423 Epoch 61: val_loss improved from 0.03912 to 0.03848, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 425ms/step - loss: 0.0423 - val_loss: 0.0385 Epoch 62/100 35/35 [==============================] - ETA: 0s - loss: 0.0416 Epoch 62: val_loss improved from 0.03848 to 0.03797, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 423ms/step - loss: 0.0416 - val_loss: 0.0380 Epoch 63/100 35/35 [==============================] - ETA: 0s - loss: 0.0403 Epoch 63: val_loss improved from 0.03797 to 0.03718, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 424ms/step - loss: 0.0403 - val_loss: 0.0372 Epoch 64/100 35/35 [==============================] - ETA: 0s - loss: 0.0388 Epoch 64: val_loss improved from 0.03718 to 0.03575, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 424ms/step - loss: 0.0388 - val_loss: 0.0358 Epoch 65/100 35/35 [==============================] - ETA: 0s - loss: 0.0381 Epoch 65: val_loss improved from 0.03575 to 0.03568, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 456ms/step - loss: 0.0381 - val_loss: 0.0357 Epoch 66/100 35/35 [==============================] - ETA: 0s - loss: 0.0377 Epoch 66: val_loss did not improve from 0.03568 35/35 [==============================] - 16s 456ms/step - loss: 0.0377 - val_loss: 0.0359 Epoch 67/100 35/35 [==============================] - ETA: 0s - loss: 0.0380 Epoch 67: val_loss did not improve from 0.03568 35/35 [==============================] - 15s 423ms/step - loss: 0.0380 - val_loss: 0.0363 Epoch 68/100 35/35 [==============================] - ETA: 0s - loss: 0.0380 Epoch 68: val_loss improved from 0.03568 to 0.03468, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 425ms/step - loss: 0.0380 - val_loss: 0.0347 Epoch 69/100 35/35 [==============================] - ETA: 0s - loss: 0.0373 Epoch 69: val_loss improved from 0.03468 to 0.03448, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 430ms/step - loss: 0.0373 - val_loss: 0.0345 Epoch 70/100 35/35 [==============================] - ETA: 0s - loss: 0.0372 Epoch 70: val_loss did not improve from 0.03448 35/35 [==============================] - 16s 454ms/step - loss: 0.0372 - val_loss: 0.0348 Epoch 71/100 35/35 [==============================] - ETA: 0s - loss: 0.0365 Epoch 71: val_loss improved from 0.03448 to 0.03403, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 422ms/step - loss: 0.0365 - val_loss: 0.0340 Epoch 72/100 35/35 [==============================] - ETA: 0s - loss: 0.0357 Epoch 72: val_loss improved from 0.03403 to 0.03378, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 426ms/step - loss: 0.0357 - val_loss: 0.0338 Epoch 73/100 35/35 [==============================] - ETA: 0s - loss: 0.0352 Epoch 73: val_loss improved from 0.03378 to 0.03353, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 432ms/step - loss: 0.0352 - val_loss: 0.0335 Epoch 74/100 35/35 [==============================] - ETA: 0s - loss: 0.0350 Epoch 74: val_loss improved from 0.03353 to 0.03340, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 435ms/step - loss: 0.0350 - val_loss: 0.0334 Epoch 75/100 35/35 [==============================] - ETA: 0s - loss: 0.0346 Epoch 75: val_loss improved from 0.03340 to 0.03320, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 17s 485ms/step - loss: 0.0346 - val_loss: 0.0332 Epoch 76/100 35/35 [==============================] - ETA: 0s - loss: 0.0344 Epoch 76: val_loss improved from 0.03320 to 0.03305, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 428ms/step - loss: 0.0344 - val_loss: 0.0331 Epoch 77/100 35/35 [==============================] - ETA: 0s - loss: 0.0341 Epoch 77: val_loss improved from 0.03305 to 0.03295, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 431ms/step - loss: 0.0341 - val_loss: 0.0330 Epoch 78/100 35/35 [==============================] - ETA: 0s - loss: 0.0337 Epoch 78: val_loss improved from 0.03295 to 0.03271, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 433ms/step - loss: 0.0337 - val_loss: 0.0327 Epoch 79/100 35/35 [==============================] - ETA: 0s - loss: 0.0335 Epoch 79: val_loss improved from 0.03271 to 0.03221, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 18s 510ms/step - loss: 0.0335 - val_loss: 0.0322 Epoch 80/100 35/35 [==============================] - ETA: 0s - loss: 0.0331 Epoch 80: val_loss improved from 0.03221 to 0.03202, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 431ms/step - loss: 0.0331 - val_loss: 0.0320 Epoch 81/100 35/35 [==============================] - ETA: 0s - loss: 0.0329 Epoch 81: val_loss did not improve from 0.03202 35/35 [==============================] - 15s 425ms/step - loss: 0.0329 - val_loss: 0.0321 Epoch 82/100 35/35 [==============================] - ETA: 0s - loss: 0.0327 Epoch 82: val_loss improved from 0.03202 to 0.03197, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 428ms/step - loss: 0.0327 - val_loss: 0.0320 Epoch 83/100 35/35 [==============================] - ETA: 0s - loss: 0.0323 Epoch 83: val_loss improved from 0.03197 to 0.03163, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 428ms/step - loss: 0.0323 - val_loss: 0.0316 Epoch 84/100 35/35 [==============================] - ETA: 0s - loss: 0.0321 Epoch 84: val_loss did not improve from 0.03163 35/35 [==============================] - 17s 485ms/step - loss: 0.0321 - val_loss: 0.0317 Epoch 85/100 35/35 [==============================] - ETA: 0s - loss: 0.0320 Epoch 85: val_loss improved from 0.03163 to 0.03151, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 429ms/step - loss: 0.0320 - val_loss: 0.0315 Epoch 86/100 35/35 [==============================] - ETA: 0s - loss: 0.0320 Epoch 86: val_loss improved from 0.03151 to 0.03144, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 429ms/step - loss: 0.0320 - val_loss: 0.0314 Epoch 87/100 35/35 [==============================] - ETA: 0s - loss: 0.0319 Epoch 87: val_loss improved from 0.03144 to 0.03144, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 427ms/step - loss: 0.0319 - val_loss: 0.0314 Epoch 88/100 35/35 [==============================] - ETA: 0s - loss: 0.0317 Epoch 88: val_loss did not improve from 0.03144 35/35 [==============================] - 15s 428ms/step - loss: 0.0317 - val_loss: 0.0318 Epoch 89/100 35/35 [==============================] - ETA: 0s - loss: 0.0342 Epoch 89: val_loss improved from 0.03144 to 0.03129, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 461ms/step - loss: 0.0342 - val_loss: 0.0313 Epoch 90/100 35/35 [==============================] - ETA: 0s - loss: 0.0314 Epoch 90: val_loss improved from 0.03129 to 0.03073, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 428ms/step - loss: 0.0314 - val_loss: 0.0307 Epoch 91/100 35/35 [==============================] - ETA: 0s - loss: 0.0308 Epoch 91: val_loss did not improve from 0.03073 35/35 [==============================] - 15s 425ms/step - loss: 0.0308 - val_loss: 0.0308 Epoch 92/100 35/35 [==============================] - ETA: 0s - loss: 0.0305 Epoch 92: val_loss improved from 0.03073 to 0.03059, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 16s 469ms/step - loss: 0.0305 - val_loss: 0.0306 Epoch 93/100 35/35 [==============================] - ETA: 0s - loss: 0.0303 Epoch 93: val_loss improved from 0.03059 to 0.03052, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 439ms/step - loss: 0.0303 - val_loss: 0.0305 Epoch 94/100 35/35 [==============================] - ETA: 0s - loss: 0.0301 Epoch 94: val_loss did not improve from 0.03052 35/35 [==============================] - 16s 449ms/step - loss: 0.0301 - val_loss: 0.0306 Epoch 95/100 35/35 [==============================] - ETA: 0s - loss: 0.0300 Epoch 95: val_loss did not improve from 0.03052 35/35 [==============================] - 15s 433ms/step - loss: 0.0300 - val_loss: 0.0306 Epoch 96/100 35/35 [==============================] - ETA: 0s - loss: 0.0300 Epoch 96: val_loss improved from 0.03052 to 0.03034, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 435ms/step - loss: 0.0300 - val_loss: 0.0303 Epoch 97/100 35/35 [==============================] - ETA: 0s - loss: 0.0298 Epoch 97: val_loss did not improve from 0.03034 35/35 [==============================] - 15s 431ms/step - loss: 0.0298 - val_loss: 0.0304 Epoch 98/100 35/35 [==============================] - ETA: 0s - loss: 0.0297 Epoch 98: val_loss improved from 0.03034 to 0.03002, saving model to best_LSTM_90_model.h5 35/35 [==============================] - 15s 444ms/step - loss: 0.0297 - val_loss: 0.0300 Epoch 99/100 35/35 [==============================] - ETA: 0s - loss: 0.0295 Epoch 99: val_loss did not improve from 0.03002 35/35 [==============================] - 15s 439ms/step - loss: 0.0295 - val_loss: 0.0303 Epoch 100/100 35/35 [==============================] - ETA: 0s - loss: 0.0297 Epoch 100: val_loss did not improve from 0.03002 35/35 [==============================] - 17s 476ms/step - loss: 0.0297 - val_loss: 0.0302
# Get the training loss from the history
loss = history_90.history['loss']
# Get the validation loss from the history
val_loss = history_90.history['val_loss']
# Get the number of epochs
epochs = range(1, len(loss) + 1)
# Plot training loss in green
plt.plot(epochs, loss, 'g', label='Training loss')
# Find the epoch with the best validation loss
best_epoch = val_loss.index(min(val_loss)) + 1
# Plot validation loss in orange
plt.plot(epochs, val_loss, 'orange', label='Validation loss')
# Mark the epoch with the best validation loss
plt.plot(best_epoch, min(val_loss), 'ro', label='Best Validation Loss')
plt.title('LSTM Model 90 Days')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
with open("history_90.pkl", 'wb') as f:
pickle.dump(history_90, f)
Dan percobaan terakhir adalah menggunakan jangka waktu 366 hari untuk memprediksi sampah 2024
model_LSTM_366 = tf.keras.Sequential()
model_LSTM_366.add(tf.keras.layers.LSTM(64, return_sequences=True, input_shape=(366, 3)))
model_LSTM_366.add(tf.keras.layers.LSTM(64, return_sequences=True))
model_LSTM_366.add(tf.keras.layers.LSTM(256, return_sequences=False))
model_LSTM_366.add(tf.keras.layers.Dense(1098, activation='relu'))
model_LSTM_366.add(tf.keras.layers.Reshape((366, 3)))
model_LSTM_366.summary()
Model: "sequential_22"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_46 (LSTM) (None, 366, 64) 17408
lstm_47 (LSTM) (None, 366, 64) 33024
lstm_48 (LSTM) (None, 256) 328704
dense_17 (Dense) (None, 1098) 282186
reshape_14 (Reshape) (None, 366, 3) 0
=================================================================
Total params: 661322 (2.52 MB)
Trainable params: 661322 (2.52 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
model_LSTM_366.compile(optimizer='adam', loss='mse')
checkpoint_366 = ModelCheckpoint('best_LSTM_366_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
history_366 = model_LSTM_366.fit(X_366, y_366, epochs=100, validation_split=0.2, callbacks=[checkpoint_366])
Epoch 1/100 28/28 [==============================] - ETA: 0s - loss: 0.1334 Epoch 1: val_loss improved from inf to 0.09401, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 57s 2s/step - loss: 0.1334 - val_loss: 0.0940 Epoch 2/100
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3103: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
saving_api.save_model(
28/28 [==============================] - ETA: 0s - loss: 0.1058 Epoch 2: val_loss improved from 0.09401 to 0.09239, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.1058 - val_loss: 0.0924 Epoch 3/100 28/28 [==============================] - ETA: 0s - loss: 0.1053 Epoch 3: val_loss improved from 0.09239 to 0.09211, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.1053 - val_loss: 0.0921 Epoch 4/100 28/28 [==============================] - ETA: 0s - loss: 0.1052 Epoch 4: val_loss did not improve from 0.09211 28/28 [==============================] - 47s 2s/step - loss: 0.1052 - val_loss: 0.0922 Epoch 5/100 28/28 [==============================] - ETA: 0s - loss: 0.1052 Epoch 5: val_loss improved from 0.09211 to 0.09204, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 50s 2s/step - loss: 0.1052 - val_loss: 0.0920 Epoch 6/100 28/28 [==============================] - ETA: 0s - loss: 0.1052 Epoch 6: val_loss improved from 0.09204 to 0.09194, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.1052 - val_loss: 0.0919 Epoch 7/100 28/28 [==============================] - ETA: 0s - loss: 0.1049 Epoch 7: val_loss improved from 0.09194 to 0.09186, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.1049 - val_loss: 0.0919 Epoch 8/100 28/28 [==============================] - ETA: 0s - loss: 0.1046 Epoch 8: val_loss improved from 0.09186 to 0.09160, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.1046 - val_loss: 0.0916 Epoch 9/100 28/28 [==============================] - ETA: 0s - loss: 0.1040 Epoch 9: val_loss improved from 0.09160 to 0.09071, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.1040 - val_loss: 0.0907 Epoch 10/100 28/28 [==============================] - ETA: 0s - loss: 0.1036 Epoch 10: val_loss improved from 0.09071 to 0.09042, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.1036 - val_loss: 0.0904 Epoch 11/100 28/28 [==============================] - ETA: 0s - loss: 0.1031 Epoch 11: val_loss improved from 0.09042 to 0.08999, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.1031 - val_loss: 0.0900 Epoch 12/100 28/28 [==============================] - ETA: 0s - loss: 0.1022 Epoch 12: val_loss improved from 0.08999 to 0.08920, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.1022 - val_loss: 0.0892 Epoch 13/100 28/28 [==============================] - ETA: 0s - loss: 0.1011 Epoch 13: val_loss improved from 0.08920 to 0.08784, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 50s 2s/step - loss: 0.1011 - val_loss: 0.0878 Epoch 14/100 28/28 [==============================] - ETA: 0s - loss: 0.1004 Epoch 14: val_loss improved from 0.08784 to 0.08690, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.1004 - val_loss: 0.0869 Epoch 15/100 28/28 [==============================] - ETA: 0s - loss: 0.0990 Epoch 15: val_loss improved from 0.08690 to 0.08628, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0990 - val_loss: 0.0863 Epoch 16/100 28/28 [==============================] - ETA: 0s - loss: 0.0984 Epoch 16: val_loss improved from 0.08628 to 0.08556, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0984 - val_loss: 0.0856 Epoch 17/100 28/28 [==============================] - ETA: 0s - loss: 0.0981 Epoch 17: val_loss did not improve from 0.08556 28/28 [==============================] - 48s 2s/step - loss: 0.0981 - val_loss: 0.0856 Epoch 18/100 28/28 [==============================] - ETA: 0s - loss: 0.0978 Epoch 18: val_loss improved from 0.08556 to 0.08504, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0978 - val_loss: 0.0850 Epoch 19/100 28/28 [==============================] - ETA: 0s - loss: 0.0969 Epoch 19: val_loss improved from 0.08504 to 0.08459, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0969 - val_loss: 0.0846 Epoch 20/100 28/28 [==============================] - ETA: 0s - loss: 0.0967 Epoch 20: val_loss did not improve from 0.08459 28/28 [==============================] - 47s 2s/step - loss: 0.0967 - val_loss: 0.0848 Epoch 21/100 28/28 [==============================] - ETA: 0s - loss: 0.0966 Epoch 21: val_loss improved from 0.08459 to 0.08428, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0966 - val_loss: 0.0843 Epoch 22/100 28/28 [==============================] - ETA: 0s - loss: 0.0967 Epoch 22: val_loss did not improve from 0.08428 28/28 [==============================] - 48s 2s/step - loss: 0.0967 - val_loss: 0.0845 Epoch 23/100 28/28 [==============================] - ETA: 0s - loss: 0.0965 Epoch 23: val_loss improved from 0.08428 to 0.08408, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0965 - val_loss: 0.0841 Epoch 24/100 28/28 [==============================] - ETA: 0s - loss: 0.0963 Epoch 24: val_loss improved from 0.08408 to 0.08390, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0963 - val_loss: 0.0839 Epoch 25/100 28/28 [==============================] - ETA: 0s - loss: 0.0959 Epoch 25: val_loss improved from 0.08390 to 0.08358, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0959 - val_loss: 0.0836 Epoch 26/100 28/28 [==============================] - ETA: 0s - loss: 0.0956 Epoch 26: val_loss did not improve from 0.08358 28/28 [==============================] - 48s 2s/step - loss: 0.0956 - val_loss: 0.0837 Epoch 27/100 28/28 [==============================] - ETA: 0s - loss: 0.0954 Epoch 27: val_loss improved from 0.08358 to 0.08263, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0954 - val_loss: 0.0826 Epoch 28/100 28/28 [==============================] - ETA: 0s - loss: 0.0951 Epoch 28: val_loss improved from 0.08263 to 0.08257, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0951 - val_loss: 0.0826 Epoch 29/100 28/28 [==============================] - ETA: 0s - loss: 0.0944 Epoch 29: val_loss improved from 0.08257 to 0.08205, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0944 - val_loss: 0.0820 Epoch 30/100 28/28 [==============================] - ETA: 0s - loss: 0.0944 Epoch 30: val_loss did not improve from 0.08205 28/28 [==============================] - 48s 2s/step - loss: 0.0944 - val_loss: 0.0821 Epoch 31/100 28/28 [==============================] - ETA: 0s - loss: 0.0939 Epoch 31: val_loss improved from 0.08205 to 0.08178, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0939 - val_loss: 0.0818 Epoch 32/100 28/28 [==============================] - ETA: 0s - loss: 0.0938 Epoch 32: val_loss did not improve from 0.08178 28/28 [==============================] - 47s 2s/step - loss: 0.0938 - val_loss: 0.0819 Epoch 33/100 28/28 [==============================] - ETA: 0s - loss: 0.0937 Epoch 33: val_loss did not improve from 0.08178 28/28 [==============================] - 47s 2s/step - loss: 0.0937 - val_loss: 0.0818 Epoch 34/100 28/28 [==============================] - ETA: 0s - loss: 0.0937 Epoch 34: val_loss did not improve from 0.08178 28/28 [==============================] - 48s 2s/step - loss: 0.0937 - val_loss: 0.0821 Epoch 35/100 28/28 [==============================] - ETA: 0s - loss: 0.0938 Epoch 35: val_loss improved from 0.08178 to 0.08143, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0938 - val_loss: 0.0814 Epoch 36/100 28/28 [==============================] - ETA: 0s - loss: 0.0934 Epoch 36: val_loss improved from 0.08143 to 0.08120, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0934 - val_loss: 0.0812 Epoch 37/100 28/28 [==============================] - ETA: 0s - loss: 0.0932 Epoch 37: val_loss did not improve from 0.08120 28/28 [==============================] - 51s 2s/step - loss: 0.0932 - val_loss: 0.0814 Epoch 38/100 28/28 [==============================] - ETA: 0s - loss: 0.0931 Epoch 38: val_loss improved from 0.08120 to 0.08118, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 52s 2s/step - loss: 0.0931 - val_loss: 0.0812 Epoch 39/100 28/28 [==============================] - ETA: 0s - loss: 0.0930 Epoch 39: val_loss improved from 0.08118 to 0.08093, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0930 - val_loss: 0.0809 Epoch 40/100 28/28 [==============================] - ETA: 0s - loss: 0.0926 Epoch 40: val_loss improved from 0.08093 to 0.08046, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0926 - val_loss: 0.0805 Epoch 41/100 28/28 [==============================] - ETA: 0s - loss: 0.0918 Epoch 41: val_loss improved from 0.08046 to 0.07967, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0918 - val_loss: 0.0797 Epoch 42/100 28/28 [==============================] - ETA: 0s - loss: 0.0913 Epoch 42: val_loss did not improve from 0.07967 28/28 [==============================] - 50s 2s/step - loss: 0.0913 - val_loss: 0.0797 Epoch 43/100 28/28 [==============================] - ETA: 0s - loss: 0.0906 Epoch 43: val_loss improved from 0.07967 to 0.07913, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 50s 2s/step - loss: 0.0906 - val_loss: 0.0791 Epoch 44/100 28/28 [==============================] - ETA: 0s - loss: 0.0907 Epoch 44: val_loss improved from 0.07913 to 0.07891, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0907 - val_loss: 0.0789 Epoch 45/100 28/28 [==============================] - ETA: 0s - loss: 0.0902 Epoch 45: val_loss improved from 0.07891 to 0.07869, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0902 - val_loss: 0.0787 Epoch 46/100 28/28 [==============================] - ETA: 0s - loss: 0.0900 Epoch 46: val_loss improved from 0.07869 to 0.07856, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 50s 2s/step - loss: 0.0900 - val_loss: 0.0786 Epoch 47/100 28/28 [==============================] - ETA: 0s - loss: 0.0892 Epoch 47: val_loss improved from 0.07856 to 0.07807, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0892 - val_loss: 0.0781 Epoch 48/100 28/28 [==============================] - ETA: 0s - loss: 0.0888 Epoch 48: val_loss improved from 0.07807 to 0.07735, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0888 - val_loss: 0.0773 Epoch 49/100 28/28 [==============================] - ETA: 0s - loss: 0.0880 Epoch 49: val_loss improved from 0.07735 to 0.07721, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0880 - val_loss: 0.0772 Epoch 50/100 28/28 [==============================] - ETA: 0s - loss: 0.0879 Epoch 50: val_loss did not improve from 0.07721 28/28 [==============================] - 49s 2s/step - loss: 0.0879 - val_loss: 0.0780 Epoch 51/100 28/28 [==============================] - ETA: 0s - loss: 0.0875 Epoch 51: val_loss improved from 0.07721 to 0.07489, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0875 - val_loss: 0.0749 Epoch 52/100 28/28 [==============================] - ETA: 0s - loss: 0.0853 Epoch 52: val_loss improved from 0.07489 to 0.07343, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0853 - val_loss: 0.0734 Epoch 53/100 28/28 [==============================] - ETA: 0s - loss: 0.0847 Epoch 53: val_loss improved from 0.07343 to 0.07311, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0847 - val_loss: 0.0731 Epoch 54/100 28/28 [==============================] - ETA: 0s - loss: 0.0845 Epoch 54: val_loss improved from 0.07311 to 0.07271, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0845 - val_loss: 0.0727 Epoch 55/100 28/28 [==============================] - ETA: 0s - loss: 0.0843 Epoch 55: val_loss improved from 0.07271 to 0.07270, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0843 - val_loss: 0.0727 Epoch 56/100 28/28 [==============================] - ETA: 0s - loss: 0.0842 Epoch 56: val_loss improved from 0.07270 to 0.07257, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0842 - val_loss: 0.0726 Epoch 57/100 28/28 [==============================] - ETA: 0s - loss: 0.0840 Epoch 57: val_loss did not improve from 0.07257 28/28 [==============================] - 47s 2s/step - loss: 0.0840 - val_loss: 0.0726 Epoch 58/100 28/28 [==============================] - ETA: 0s - loss: 0.0837 Epoch 58: val_loss did not improve from 0.07257 28/28 [==============================] - 48s 2s/step - loss: 0.0837 - val_loss: 0.0727 Epoch 59/100 28/28 [==============================] - ETA: 0s - loss: 0.0834 Epoch 59: val_loss improved from 0.07257 to 0.07246, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 50s 2s/step - loss: 0.0834 - val_loss: 0.0725 Epoch 60/100 28/28 [==============================] - ETA: 0s - loss: 0.0839 Epoch 60: val_loss did not improve from 0.07246 28/28 [==============================] - 47s 2s/step - loss: 0.0839 - val_loss: 0.0732 Epoch 61/100 28/28 [==============================] - ETA: 0s - loss: 0.0829 Epoch 61: val_loss did not improve from 0.07246 28/28 [==============================] - 47s 2s/step - loss: 0.0829 - val_loss: 0.0734 Epoch 62/100 28/28 [==============================] - ETA: 0s - loss: 0.0823 Epoch 62: val_loss improved from 0.07246 to 0.07175, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0823 - val_loss: 0.0717 Epoch 63/100 28/28 [==============================] - ETA: 0s - loss: 0.0816 Epoch 63: val_loss did not improve from 0.07175 28/28 [==============================] - 48s 2s/step - loss: 0.0816 - val_loss: 0.0725 Epoch 64/100 28/28 [==============================] - ETA: 0s - loss: 0.0810 Epoch 64: val_loss improved from 0.07175 to 0.07145, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0810 - val_loss: 0.0715 Epoch 65/100 28/28 [==============================] - ETA: 0s - loss: 0.0805 Epoch 65: val_loss did not improve from 0.07145 28/28 [==============================] - 47s 2s/step - loss: 0.0805 - val_loss: 0.0727 Epoch 66/100 28/28 [==============================] - ETA: 0s - loss: 0.0800 Epoch 66: val_loss did not improve from 0.07145 28/28 [==============================] - 47s 2s/step - loss: 0.0800 - val_loss: 0.0715 Epoch 67/100 28/28 [==============================] - ETA: 0s - loss: 0.0783 Epoch 67: val_loss improved from 0.07145 to 0.07033, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0783 - val_loss: 0.0703 Epoch 68/100 28/28 [==============================] - ETA: 0s - loss: 0.0762 Epoch 68: val_loss improved from 0.07033 to 0.06623, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0762 - val_loss: 0.0662 Epoch 69/100 28/28 [==============================] - ETA: 0s - loss: 0.0735 Epoch 69: val_loss improved from 0.06623 to 0.06404, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0735 - val_loss: 0.0640 Epoch 70/100 28/28 [==============================] - ETA: 0s - loss: 0.0707 Epoch 70: val_loss improved from 0.06404 to 0.06235, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0707 - val_loss: 0.0624 Epoch 71/100 28/28 [==============================] - ETA: 0s - loss: 0.0670 Epoch 71: val_loss improved from 0.06235 to 0.05952, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 51s 2s/step - loss: 0.0670 - val_loss: 0.0595 Epoch 72/100 28/28 [==============================] - ETA: 0s - loss: 0.0656 Epoch 72: val_loss improved from 0.05952 to 0.05916, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 51s 2s/step - loss: 0.0656 - val_loss: 0.0592 Epoch 73/100 28/28 [==============================] - ETA: 0s - loss: 0.0628 Epoch 73: val_loss improved from 0.05916 to 0.05530, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0628 - val_loss: 0.0553 Epoch 74/100 28/28 [==============================] - ETA: 0s - loss: 0.0596 Epoch 74: val_loss improved from 0.05530 to 0.05269, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 50s 2s/step - loss: 0.0596 - val_loss: 0.0527 Epoch 75/100 28/28 [==============================] - ETA: 0s - loss: 0.0567 Epoch 75: val_loss improved from 0.05269 to 0.05172, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 51s 2s/step - loss: 0.0567 - val_loss: 0.0517 Epoch 76/100 28/28 [==============================] - ETA: 0s - loss: 0.0551 Epoch 76: val_loss improved from 0.05172 to 0.05007, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0551 - val_loss: 0.0501 Epoch 77/100 28/28 [==============================] - ETA: 0s - loss: 0.0537 Epoch 77: val_loss improved from 0.05007 to 0.04817, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0537 - val_loss: 0.0482 Epoch 78/100 28/28 [==============================] - ETA: 0s - loss: 0.0517 Epoch 78: val_loss improved from 0.04817 to 0.04622, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0517 - val_loss: 0.0462 Epoch 79/100 28/28 [==============================] - ETA: 0s - loss: 0.0483 Epoch 79: val_loss improved from 0.04622 to 0.04296, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 49s 2s/step - loss: 0.0483 - val_loss: 0.0430 Epoch 80/100 28/28 [==============================] - ETA: 0s - loss: 0.0451 Epoch 80: val_loss improved from 0.04296 to 0.04102, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0451 - val_loss: 0.0410 Epoch 81/100 28/28 [==============================] - ETA: 0s - loss: 0.0438 Epoch 81: val_loss improved from 0.04102 to 0.03986, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0438 - val_loss: 0.0399 Epoch 82/100 28/28 [==============================] - ETA: 0s - loss: 0.0421 Epoch 82: val_loss improved from 0.03986 to 0.03965, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0421 - val_loss: 0.0396 Epoch 83/100 28/28 [==============================] - ETA: 0s - loss: 0.0407 Epoch 83: val_loss improved from 0.03965 to 0.03798, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0407 - val_loss: 0.0380 Epoch 84/100 28/28 [==============================] - ETA: 0s - loss: 0.0395 Epoch 84: val_loss improved from 0.03798 to 0.03726, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0395 - val_loss: 0.0373 Epoch 85/100 28/28 [==============================] - ETA: 0s - loss: 0.0387 Epoch 85: val_loss improved from 0.03726 to 0.03608, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0387 - val_loss: 0.0361 Epoch 86/100 28/28 [==============================] - ETA: 0s - loss: 0.0377 Epoch 86: val_loss improved from 0.03608 to 0.03587, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0377 - val_loss: 0.0359 Epoch 87/100 28/28 [==============================] - ETA: 0s - loss: 0.0370 Epoch 87: val_loss improved from 0.03587 to 0.03539, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0370 - val_loss: 0.0354 Epoch 88/100 28/28 [==============================] - ETA: 0s - loss: 0.0368 Epoch 88: val_loss did not improve from 0.03539 28/28 [==============================] - 47s 2s/step - loss: 0.0368 - val_loss: 0.0354 Epoch 89/100 28/28 [==============================] - ETA: 0s - loss: 0.0365 Epoch 89: val_loss improved from 0.03539 to 0.03494, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 46s 2s/step - loss: 0.0365 - val_loss: 0.0349 Epoch 90/100 28/28 [==============================] - ETA: 0s - loss: 0.0359 Epoch 90: val_loss improved from 0.03494 to 0.03464, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0359 - val_loss: 0.0346 Epoch 91/100 28/28 [==============================] - ETA: 0s - loss: 0.0355 Epoch 91: val_loss improved from 0.03464 to 0.03402, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0355 - val_loss: 0.0340 Epoch 92/100 28/28 [==============================] - ETA: 0s - loss: 0.0351 Epoch 92: val_loss improved from 0.03402 to 0.03351, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0351 - val_loss: 0.0335 Epoch 93/100 28/28 [==============================] - ETA: 0s - loss: 0.0342 Epoch 93: val_loss improved from 0.03351 to 0.03337, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0342 - val_loss: 0.0334 Epoch 94/100 28/28 [==============================] - ETA: 0s - loss: 0.0340 Epoch 94: val_loss improved from 0.03337 to 0.03317, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0340 - val_loss: 0.0332 Epoch 95/100 28/28 [==============================] - ETA: 0s - loss: 0.0337 Epoch 95: val_loss did not improve from 0.03317 28/28 [==============================] - 46s 2s/step - loss: 0.0337 - val_loss: 0.0333 Epoch 96/100 28/28 [==============================] - ETA: 0s - loss: 0.0337 Epoch 96: val_loss improved from 0.03317 to 0.03310, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0337 - val_loss: 0.0331 Epoch 97/100 28/28 [==============================] - ETA: 0s - loss: 0.0336 Epoch 97: val_loss did not improve from 0.03310 28/28 [==============================] - 47s 2s/step - loss: 0.0336 - val_loss: 0.0334 Epoch 98/100 28/28 [==============================] - ETA: 0s - loss: 0.0331 Epoch 98: val_loss improved from 0.03310 to 0.03265, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 47s 2s/step - loss: 0.0331 - val_loss: 0.0327 Epoch 99/100 28/28 [==============================] - ETA: 0s - loss: 0.0329 Epoch 99: val_loss did not improve from 0.03265 28/28 [==============================] - 46s 2s/step - loss: 0.0329 - val_loss: 0.0329 Epoch 100/100 28/28 [==============================] - ETA: 0s - loss: 0.0324 Epoch 100: val_loss improved from 0.03265 to 0.03227, saving model to best_LSTM_366_model.h5 28/28 [==============================] - 48s 2s/step - loss: 0.0324 - val_loss: 0.0323
# Get the training loss from the history
loss = history_366.history['loss']
# Get the validation loss from the history
val_loss = history_366.history['val_loss']
# Get the number of epochs
epochs = range(1, len(loss) + 1)
# Plot training loss in green
plt.plot(epochs, loss, 'g', label='Training loss')
# Find the epoch with the best validation loss
best_epoch = val_loss.index(min(val_loss)) + 1
# Plot validation loss in orange
plt.plot(epochs, val_loss, 'orange', label='Validation loss')
# Mark the epoch with the best validation loss
plt.plot(best_epoch, min(val_loss), 'ro', label='Best Validation Loss')
plt.title('LSTM Model 366 Days')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
with open("history_366.pkl", 'wb') as f:
pickle.dump(history_366, f)
Setelah dilakukan tiga kali percobaan, ditemukan percobaan terbaik adalah percobaan dengan 30 hari (percobaan pertama) ditandai dengan loss validation terkecil yaitu 0.03123
Bagian terakhir adalah memprediksi sampah 2024 dengan model yang telah didapat
X_pred = np.array([df_timeseries[['magelang_utara', 'magelang_tengah', 'magelang_selatan']].iloc[-30:].values]) # Membuat data yaitu 30 hari terakhir untuk diprediksi
y_pred = model_LSTM.predict(X_pred) # Prediksi menggunakan model terbaik yaitu model 30 Hari
print(y_pred.shape)
print(y_pred[:5])
1/1 [==============================] - 4s 4s/step (1, 366, 3) [[[0.5976804 0.4909475 0.7239353 ] [0.55852544 0.43864074 0.693532 ] [0.5564108 0.45952934 0.65807575] ... [0.52366143 0.40401322 0.51513886] [0.55716836 0.43663803 0.5384773 ] [0.57083505 0.3644834 0.47720507]]]
y_pred_denorm = scaler.inverse_transform(y_pred[0]) # Mengembalikan data prediksi yang dinormalisasi menjadi bentuk semula
df_pred = pd.DataFrame(y_pred_denorm, columns=['magelang_utara', 'magelang_tengah', 'magelang_selatan']) # Membentuk data prediksi menjadi dataframe
df_pred.head()
| magelang_utara | magelang_tengah | magelang_selatan | |
|---|---|---|---|
| 0 | 24533.755859 | 11401.963867 | 22240.269531 |
| 1 | 22942.888672 | 10214.337891 | 21323.457031 |
| 2 | 22856.970703 | 10688.613281 | 20254.273438 |
| 3 | 20227.412109 | 10738.855469 | 18646.232422 |
| 4 | 18266.132812 | 9440.942383 | 17265.660156 |
# Mengembalikan nilai ke bentuk semula sebelum normalisasi
df_timeseries_denorm = denormalize_dataframe(df_timeseries, ['magelang_utara', 'magelang_tengah', 'magelang_selatan'], scaler) # Mengembalikan dataframe (bukan data prediksi) yang dinormalisasi menjadi bentuk semula
df_timeseries_denorm.head()
| tanggal | magelang_utara | magelang_tengah | magelang_selatan | |
|---|---|---|---|---|
| 0 | 2019-01-03 | 7320.0 | 280.0 | 3920.0 |
| 1 | 2019-01-04 | 26154.0 | 5510.0 | 4130.0 |
| 2 | 2019-01-05 | 22700.0 | 4680.0 | 5640.0 |
| 3 | 2019-01-06 | 24320.0 | 4370.0 | 8950.0 |
| 4 | 2019-01-07 | 34742.0 | 15350.0 | 16360.0 |
# Concatenate original_df with pred_df
extended_df = pd.concat([df_timeseries_denorm, df_pred], axis=0) # Menggabungkan data asli dengan data prediksi
# Extend the date by 366 days
last_date = df_timeseries_denorm['tanggal'].iloc[-1] # Memperpanjang tanggal satu tahun lebih (2024)
extended_dates = pd.date_range(start=last_date, periods=366)
# Add the extended dates to the DataFrame
extended_df['tanggal'] = pd.concat([df_timeseries_denorm['tanggal'], pd.Series(extended_dates)]) # Menggabungkan tanggal ke dalam dataframe
# Reset index
extended_df.reset_index(drop=True, inplace=True) # Reset index untuk dataframe
temp_type = np.array([0]*len(extended_df)) # Menambahkan kolom 'type', dimana 0 untuk data asli, 1 untuk data prediksi
temp_type[-366:] = 1
extended_df['type'] = temp_type
extended_df.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 2190 entries, 0 to 2189 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 tanggal 2190 non-null datetime64[ns] 1 magelang_utara 2190 non-null float64 2 magelang_tengah 2190 non-null float64 3 magelang_selatan 2190 non-null float64 4 type 2190 non-null int64 dtypes: datetime64[ns](1), float64(3), int64(1) memory usage: 85.7 KB
extended_df.to_csv('extended_df.csv', index=False) # Menyimpan data ke dalam bentuk .csv
import matplotlib.pyplot as plt # Visualisasi data gabungan
# Assuming date_index_df contains the extended DataFrame with columns 'date', 'value1', 'value2', 'value3', 'pred_value1', 'pred_value2', 'pred_value3'
# Set the 'date' column as the index
date_index_df = extended_df.set_index('tanggal')
# Plot the values
plt.figure(figsize=(10, 6))
plt.plot(date_index_df.index, date_index_df['magelang_utara'], label='Original Value 1')
plt.plot(date_index_df.index, date_index_df['magelang_tengah'], label='Original Value 2')
plt.plot(date_index_df.index, date_index_df['magelang_selatan'], label='Original Value 3')
plt.xlabel('Date')
plt.ylabel('Value')
plt.title('Original and Predicted Values Over Time')
plt.legend()
plt.grid(True)
plt.show()