Commit 6ea71393 authored by Samuel Gorta's avatar Samuel Gorta
Browse files

initial notebook

parent a45d73b8
Loading
Loading
Loading
Loading

data/RD_shuffled.npy

0 → 100644
+80.1 MiB

File added.

No diff preview for this file type.

optim.ipynb

0 → 100644
+5 −0
Original line number Diff line number Diff line
%% Cell type:code id: tags:

``` python

```

requirements.txt

0 → 100644
+4 −0
Original line number Diff line number Diff line
numpy==1.22.3
tensorflow==2.5.2
matplotlib==3.5.1
keras==2.7.0
 No newline at end of file

train_ae.ipynb

0 → 100644
+98 −0
Original line number Diff line number Diff line
%% Cell type:code id: tags:

``` python
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.models import Model

ti_seq = [*list(range(50, 400, 25)),
          *list(range(400, 1000, 10)),
          1000, 1030, 1050, 1080, 1100, 1130, 1150, 1180, 1200, 1230, 1250, 1280, 1300, 1330, 1350, 1380,
          1400, 1450, 1500, 1550, 1600, 1650, 1700,
          1800, 1900, 2000, 2100, 2200, 2300, 2500, 3000]

# Load real shuffled data
data = np.load("data/RD_shuffled.npy")

print(data.shape)
data_with_tis = np.array([list(x) + ti_seq for x in data], dtype=float)

train_in = data_with_tis[:int(len(data_with_tis) * 0.85)]
train_out = data[:int(len(data) * 0.85)]

val_in = data_with_tis[int(len(data_with_tis) * 0.85):-50]
val_out = data[int(len(data) * 0.85):-50]

test_in = data_with_tis[-50:]
test_out = data[-50:]


print(len(data_with_tis), len(train_in), len(val_in), len(test_in))
print(train_in.shape)

new_train = True

if new_train:
    model = tf.keras.models.Sequential()
    # encoder
    model.add(tf.keras.layers.Dense(units=105, activation='relu', input_dim=105, name="in"))
    # model.add(tf.keras.layers.Dense(units=64, activation='relu'))
    # model.add(tf.keras.layers.Dense(units=32, activation='relu'))
    model.add(tf.keras.layers.Dense(units=14, activation='relu', name="bn"))
    # decoder
    # model.add(tf.keras.layers.Dense(units=32, activation='relu'))
    # model.add(tf.keras.layers.Dense(units=64, activation='relu'))
    model.add(tf.keras.layers.Dense(units=105, activation='linear', name="out"))

    mse = tf.keras.losses.MeanSquaredError(reduction="auto", name="mean_squared_error")
    model.compile(optimizer='adam', loss=mse, metrics=["accuracy"])

    m_hist = model.fit(train_out, train_out, validation_data=(val_out, val_out), batch_size=128, epochs=30)
    model.save("tf_models_BN14-W3")

    plt.plot(m_hist.history['accuracy'])
    plt.plot(m_hist.history['val_accuracy'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    plt.plot(m_hist.history['loss'])
    plt.plot(m_hist.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

else:
    model = tf.keras.models.load_model("tf_models_BN14-W3",
                                       custom_objects=None,
                                       compile=True)
    # train on another epochs
    # m_hist = model.fit(train, train, validation_data=(val, val), batch_size=32, epochs=30)
    # tf.keras.models.save_model(model, "tf_models_BT8-85-1")
    # plt.plot(m_hist.history['accuracy'])
    # plt.plot(m_hist.history['val_accuracy'])

    # get encoder -> the 14 params from bottle neck
    enc_model = Model(inputs=model.input,
                      outputs=model.get_layer('bn').output)
    enc_output = enc_model.predict(data)
    print("Cov")
    print(np.average(enc_output, axis=0))
    cov_m = np.cov(enc_output.T)
    # print(cov_m)
    eig = np.linalg.eigvals(cov_m)
    print(eig)

pred = model.predict(test_out)
for i in range(len(test_out)):
    plt.plot(ti_seq, test_out[i], color='red', marker="o", label="data")
    plt.plot(ti_seq, pred[i], color='blue', marker="o", label="pred")
    plt.ylim(0, 2000)
    plt.legend()
    plt.show()
```