Skip to content
Snippets Groups Projects
Commit 02ea2986 authored by Mattia Mancini's avatar Mattia Mancini
Browse files

First commit

parents
No related branches found
No related tags found
No related merge requests found
Source diff could not be displayed: it is too large. Options to address this: view the blob.
%% Cell type:code id:277d841b tags:
``` python
import numpy as np
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import h5py
import os.path
dataset=None
timerange=None
freqrange=None
ids=None
base_dir = '/project/lofarsw/Data/Dynspec/'
fpath = os.path.join(base_dir, 'dset.h5')
lpath = os.path.join(base_dir, 'labels.csv')
ts_to_klass = None
with open(lpath, 'r') as f_in:
ts_to_klass = {x[0]: int(x[1]) for x in map(lambda x: x.split(','), filter(lambda x: x, f_in.read().split('\n')[1:]))}
with h5py.File(fpath, 'r') as fin:
in_dataset=fin['/data'][:]
in_timerange=fin['/time_range'][:]
in_freqrange=fin['/freq_range'][:]
in_ids=fin['timestamps'][:]
in_label_names = {1: 'Type I storm', 2: 'Type II', 3: 'Type III', 4: 'Type IV', 5: 'Atypical', 6: 'Nothing'}
g = np.where(~np.isnan(in_dataset.sum(axis=(1,2))))
dataset = in_dataset[g]
timerange= in_timerange[g]
freqrange= in_freqrange[g]
ids = in_ids[g]
#avg = dataset[:, :, 404]
klasses = np.array([ts_to_klass.get(ts.decode(), -1) for ts in in_ids], dtype=int)
shape = dataset.shape
dataset = dataset.reshape((*shape, 1))
print(dataset.shape)
```
%% Output
2023-04-12 14:45:57.443373: I tensorflow/core/platform/cpu_feature_guard.cc:194] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: SSE3 SSE4.1 SSE4.2 AVX
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-04-12 14:45:57.611485: I tensorflow/core/util/port.cc:104] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
/usr/local/lib/python3.8/dist-packages/pandas/core/computation/expressions.py:20: UserWarning: Pandas requires version '2.7.3' or newer of 'numexpr' (version '2.7.1' currently installed).
from pandas.core.computation.check import NUMEXPR_INSTALLED
2023-04-12 14:45:59.939213: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:45:59.980067: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:45:59.980790: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
(7857, 500, 800, 1)
%% Cell type:code id:da00f1d5 tags:
``` python
with_label = np.where(klasses != -1)[0]
class_names = [1,2,3,4,5,6]
num_classes = len(class_names)
def to_logits(labels, num_classes):
logits = np.zeros((len(labels), num_classes))
for i, r in enumerate(labels):
logits[i, r] = 1
return logits
shuffled_dset = dataset[with_label, :, :, :]
shuffled_ids = dataset[with_label]
shuffled_klasses = klasses[with_label] - 1
logits_klasses = to_logits(shuffled_klasses, num_classes)
img_height, img_width, _ = dataset[0].shape
batch_size = 32
training_size = 2500
test_size = 100
validation_size = len(with_label) - training_size - test_size
tfdataset = tf.data.Dataset.from_tensor_slices((shuffled_dset, logits_klasses)).shuffle(len(with_label), seed=42)
training_dset = tfdataset.take(training_size).batch(batch_size)
validation_dset = tfdataset.skip(training_size).take(validation_size).batch(batch_size)
test_dset = tfdataset.skip(training_size).skip(validation_size).take(test_size).batch(batch_size)
```
%% Output
2023-04-12 14:46:14.907179: I tensorflow/core/platform/cpu_feature_guard.cc:194] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: SSE3 SSE4.1 SSE4.2 AVX
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-04-12 14:46:14.911721: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:46:14.912555: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:46:14.913131: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:46:15.007508: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:46:15.008240: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:46:15.008822: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-04-12 14:46:15.009023: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1621] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14412 MB memory: -> device: 0, name: Tesla V100-PCIE-32GB, pci bus id: 0000:00:06.0, compute capability: 7.0
%% Cell type:code id:aa0f3be0 tags:
``` python
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
model = Sequential([
tf.keras.layers.InputLayer(input_shape=(img_height, img_width, 1)),
layers.RandomTranslation(height_factor=(-.2, .2), width_factor=(-.2, .2)),
layers.RandomCrop(height=40, width=150),
tf.keras.applications.resnet_v2.ResNet50V2(
include_top=True,
weights=None,
input_shape=(40, 150, 1),
pooling=None,
classes=6,
classifier_activation='relu'
)
])
model.compile(optimizer='adam',
#loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
```
%% Output
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformV2 cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformV2 cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformIntV2 cause there is no registered converter for this op.
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
random_translation_1 (Rando (None, 500, 800, 1) 0
mTranslation)
random_crop_1 (RandomCrop) (None, 40, 150, 1) 0
resnet50v2 (Functional) (None, 6) 23570822
=================================================================
Total params: 23,570,822
Trainable params: 23,525,382
Non-trainable params: 45,440
_________________________________________________________________
%% Cell type:code id:e3225424 tags:
``` python
epochs=200
history = model.fit(
training_dset,
validation_data=validation_dset,
epochs=epochs
)
```
%% Output
Epoch 1/200
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformV2 cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformV2 cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformIntV2 cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformV2 cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformV2 cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting RngReadAndSkip cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting Bitcast cause there is no registered converter for this op.
WARNING:tensorflow:Using a while_loop for converting StatelessRandomUniformIntV2 cause there is no registered converter for this op.
2023-04-12 14:50:03.457149: W tensorflow/core/grappler/optimizers/loop_optimizer.cc:907] Skipping loop optimization for Merge node with control input: sequential_1/random_crop_1/loop_body/cond/pfor/cond/branch_executed/_527
79/79 [==============================] - 45s 204ms/step - loss: 1.4692 - accuracy: 0.5160 - val_loss: 1.3556 - val_accuracy: 0.5630
Epoch 2/200
79/79 [==============================] - 15s 193ms/step - loss: 1.3913 - accuracy: 0.5312 - val_loss: 1.4141 - val_accuracy: 0.5197
Epoch 3/200
79/79 [==============================] - 15s 192ms/step - loss: 1.4021 - accuracy: 0.5256 - val_loss: 1.7683 - val_accuracy: 0.2047
Epoch 4/200
79/79 [==============================] - 15s 193ms/step - loss: 1.4181 - accuracy: 0.5268 - val_loss: 1.5771 - val_accuracy: 0.4173
Epoch 5/200
79/79 [==============================] - 15s 192ms/step - loss: 1.3899 - accuracy: 0.5292 - val_loss: 1.3639 - val_accuracy: 0.5394
Epoch 6/200
79/79 [==============================] - 15s 192ms/step - loss: 1.4041 - accuracy: 0.5280 - val_loss: 1.7560 - val_accuracy: 0.2126
Epoch 7/200
79/79 [==============================] - 15s 194ms/step - loss: 1.4212 - accuracy: 0.5012 - val_loss: 1.6034 - val_accuracy: 0.3780
Epoch 8/200
79/79 [==============================] - 15s 192ms/step - loss: 1.3979 - accuracy: 0.5232 - val_loss: 1.3061 - val_accuracy: 0.5906
Epoch 9/200
79/79 [==============================] - 15s 192ms/step - loss: 1.3850 - accuracy: 0.5288 - val_loss: 1.2938 - val_accuracy: 0.5906
Epoch 10/200
79/79 [==============================] - 15s 193ms/step - loss: 1.3933 - accuracy: 0.5228 - val_loss: 1.5326 - val_accuracy: 0.4567
Epoch 11/200
79/79 [==============================] - 15s 192ms/step - loss: 1.4277 - accuracy: 0.5164 - val_loss: 1.7918 - val_accuracy: 0.1969
Epoch 12/200
79/79 [==============================] - 15s 194ms/step - loss: 1.4187 - accuracy: 0.5280 - val_loss: 1.7918 - val_accuracy: 0.1575
Epoch 13/200
79/79 [==============================] - 15s 192ms/step - loss: 1.3864 - accuracy: 0.5304 - val_loss: 1.3320 - val_accuracy: 0.6024
Epoch 14/200
79/79 [==============================] - 15s 193ms/step - loss: 1.4006 - accuracy: 0.5220 - val_loss: 1.7215 - val_accuracy: 0.2520
Epoch 15/200
79/79 [==============================] - 15s 194ms/step - loss: 1.3964 - accuracy: 0.5216 - val_loss: 1.5706 - val_accuracy: 0.4055
Epoch 16/200
79/79 [==============================] - 16s 196ms/step - loss: 1.3942 - accuracy: 0.5264 - val_loss: 1.7317 - val_accuracy: 0.2244
Epoch 17/200
79/79 [==============================] - 15s 193ms/step - loss: 1.3973 - accuracy: 0.5256 - val_loss: 1.3525 - val_accuracy: 0.5669
Epoch 18/200
79/79 [==============================] - 12s 157ms/step - loss: 1.4012 - accuracy: 0.5136 - val_loss: 1.7676 - val_accuracy: 0.2126
Epoch 19/200
79/79 [==============================] - 12s 156ms/step - loss: 1.3992 - accuracy: 0.5256 - val_loss: 1.4661 - val_accuracy: 0.4921
Epoch 20/200
79/79 [==============================] - 12s 157ms/step - loss: 1.4108 - accuracy: 0.5188 - val_loss: 1.3133 - val_accuracy: 0.5709
Epoch 21/200
79/79 [==============================] - 14s 179ms/step - loss: 1.4016 - accuracy: 0.5212 - val_loss: 1.3943 - val_accuracy: 0.5197
Epoch 22/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3981 - accuracy: 0.5232 - val_loss: 1.3928 - val_accuracy: 0.5512
Epoch 23/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3912 - accuracy: 0.5308 - val_loss: 1.4124 - val_accuracy: 0.5315
Epoch 24/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3964 - accuracy: 0.5176 - val_loss: 1.3383 - val_accuracy: 0.5591
Epoch 25/200
79/79 [==============================] - 15s 191ms/step - loss: 1.3829 - accuracy: 0.5332 - val_loss: 1.3985 - val_accuracy: 0.5315
Epoch 26/200
79/79 [==============================] - 15s 191ms/step - loss: 1.4624 - accuracy: 0.5124 - val_loss: 1.5770 - val_accuracy: 0.4134
Epoch 27/200
79/79 [==============================] - 15s 190ms/step - loss: 1.4359 - accuracy: 0.4980 - val_loss: 1.6139 - val_accuracy: 0.3858
Epoch 28/200
79/79 [==============================] - 15s 191ms/step - loss: 1.3948 - accuracy: 0.5296 - val_loss: 1.7895 - val_accuracy: 0.2283
Epoch 29/200
79/79 [==============================] - 15s 191ms/step - loss: 1.4227 - accuracy: 0.5156 - val_loss: 1.6671 - val_accuracy: 0.3583
Epoch 30/200
79/79 [==============================] - 15s 192ms/step - loss: 1.4064 - accuracy: 0.5340 - val_loss: 1.7951 - val_accuracy: 0.5354
Epoch 31/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3993 - accuracy: 0.5268 - val_loss: 1.3242 - val_accuracy: 0.5827
Epoch 32/200
79/79 [==============================] - 15s 189ms/step - loss: 1.4091 - accuracy: 0.5240 - val_loss: 1.4127 - val_accuracy: 0.5079
Epoch 33/200
79/79 [==============================] - 14s 170ms/step - loss: 1.4095 - accuracy: 0.5268 - val_loss: 1.7918 - val_accuracy: 0.1732
Epoch 34/200
79/79 [==============================] - 15s 189ms/step - loss: 1.4207 - accuracy: 0.5300 - val_loss: 1.7699 - val_accuracy: 0.1299
Epoch 35/200
79/79 [==============================] - 15s 190ms/step - loss: 1.4023 - accuracy: 0.5176 - val_loss: 1.6538 - val_accuracy: 0.2717
Epoch 36/200
79/79 [==============================] - 15s 190ms/step - loss: 1.4641 - accuracy: 0.5056 - val_loss: 1.4979 - val_accuracy: 0.5000
Epoch 37/200
79/79 [==============================] - 15s 190ms/step - loss: 1.4453 - accuracy: 0.5348 - val_loss: 1.3891 - val_accuracy: 0.5551
Epoch 38/200
79/79 [==============================] - 15s 190ms/step - loss: 1.4198 - accuracy: 0.5312 - val_loss: 1.3272 - val_accuracy: 0.5591
Epoch 39/200
79/79 [==============================] - 15s 188ms/step - loss: 1.4148 - accuracy: 0.5376 - val_loss: 1.4845 - val_accuracy: 0.5276
Epoch 40/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3911 - accuracy: 0.5416 - val_loss: 1.6488 - val_accuracy: 0.3898
Epoch 41/200
79/79 [==============================] - 15s 191ms/step - loss: 1.4155 - accuracy: 0.5204 - val_loss: 1.3346 - val_accuracy: 0.5433
Epoch 42/200
79/79 [==============================] - 15s 189ms/step - loss: 1.4017 - accuracy: 0.5212 - val_loss: 1.3755 - val_accuracy: 0.5315
Epoch 43/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3982 - accuracy: 0.5264 - val_loss: 1.4069 - val_accuracy: 0.4961
Epoch 44/200
79/79 [==============================] - 15s 191ms/step - loss: 1.3734 - accuracy: 0.5392 - val_loss: 1.3838 - val_accuracy: 0.5276
Epoch 45/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3945 - accuracy: 0.5324 - val_loss: 1.5031 - val_accuracy: 0.4488
Epoch 46/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3970 - accuracy: 0.5296 - val_loss: 1.3349 - val_accuracy: 0.5748
Epoch 47/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3914 - accuracy: 0.5240 - val_loss: 1.4520 - val_accuracy: 0.5000
Epoch 48/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3858 - accuracy: 0.5300 - val_loss: 1.5360 - val_accuracy: 0.4094
Epoch 49/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3876 - accuracy: 0.5240 - val_loss: 1.4506 - val_accuracy: 0.4961
Epoch 50/200
79/79 [==============================] - 15s 188ms/step - loss: 1.3866 - accuracy: 0.5316 - val_loss: 1.4856 - val_accuracy: 0.4803
Epoch 51/200
79/79 [==============================] - 15s 188ms/step - loss: 1.3881 - accuracy: 0.5316 - val_loss: 1.4590 - val_accuracy: 0.5039
Epoch 52/200
79/79 [==============================] - 15s 187ms/step - loss: 1.3926 - accuracy: 0.5292 - val_loss: 1.4648 - val_accuracy: 0.5118
Epoch 53/200
79/79 [==============================] - 15s 187ms/step - loss: 1.3791 - accuracy: 0.5360 - val_loss: 1.4039 - val_accuracy: 0.5197
Epoch 54/200
79/79 [==============================] - 15s 187ms/step - loss: 1.4000 - accuracy: 0.5276 - val_loss: 1.2531 - val_accuracy: 0.5906
Epoch 55/200
79/79 [==============================] - 15s 188ms/step - loss: 1.3777 - accuracy: 0.5392 - val_loss: 1.2780 - val_accuracy: 0.5709
Epoch 56/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3821 - accuracy: 0.5320 - val_loss: 1.3780 - val_accuracy: 0.5157
Epoch 57/200
79/79 [==============================] - 15s 188ms/step - loss: 1.4003 - accuracy: 0.5276 - val_loss: 1.4809 - val_accuracy: 0.4803
Epoch 58/200
79/79 [==============================] - 15s 188ms/step - loss: 1.3873 - accuracy: 0.5252 - val_loss: 1.7747 - val_accuracy: 0.1890
Epoch 59/200
79/79 [==============================] - 15s 187ms/step - loss: 1.3802 - accuracy: 0.5376 - val_loss: 1.6755 - val_accuracy: 0.3307
Epoch 60/200
79/79 [==============================] - 15s 188ms/step - loss: 1.3931 - accuracy: 0.5240 - val_loss: 1.3273 - val_accuracy: 0.5787
Epoch 61/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3793 - accuracy: 0.5324 - val_loss: 1.3590 - val_accuracy: 0.5512
Epoch 62/200
79/79 [==============================] - 15s 192ms/step - loss: 1.3866 - accuracy: 0.5268 - val_loss: 1.3875 - val_accuracy: 0.5354
Epoch 63/200
79/79 [==============================] - 15s 191ms/step - loss: 1.4101 - accuracy: 0.5180 - val_loss: 1.3784 - val_accuracy: 0.5354
Epoch 64/200
79/79 [==============================] - 15s 190ms/step - loss: 1.4072 - accuracy: 0.5204 - val_loss: 1.3794 - val_accuracy: 0.5394
Epoch 65/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3871 - accuracy: 0.5296 - val_loss: 1.3762 - val_accuracy: 0.5472
Epoch 66/200
79/79 [==============================] - 15s 188ms/step - loss: 1.3802 - accuracy: 0.5312 - val_loss: 1.3611 - val_accuracy: 0.5315
Epoch 67/200
79/79 [==============================] - 15s 190ms/step - loss: 1.3855 - accuracy: 0.5284 - val_loss: 1.3567 - val_accuracy: 0.5709
Epoch 68/200
79/79 [==============================] - 15s 186ms/step - loss: 1.3996 - accuracy: 0.5208 - val_loss: 1.2827 - val_accuracy: 0.5709
Epoch 69/200
79/79 [==============================] - 15s 189ms/step - loss: 1.3824 - accuracy: 0.5276 - val_loss: 1.3300 - val_accuracy: 0.5669
Epoch 70/200
79/79 [==============================] - 15s 192ms/step - loss: 1.3905 - accuracy: 0.5324 - val_loss: 1.3069 - val_accuracy: 0.5591
Epoch 71/200
79/79 [==============================] - 15s 191ms/step - loss: 1.3792 - accuracy: 0.5360 - val_loss: 1.4586 - val_accuracy: 0.4921
Epoch 72/200
79/79 [==============================] - 15s 193ms/step - loss: 1.3905 - accuracy: 0.5288 - val_loss: 1.3358 - val_accuracy: 0.5669
Epoch 73/200
79/79 [==============================] - 14s 173ms/step - loss: 1.3917 - accuracy: 0.5252 - val_loss: 1.3838 - val_accuracy: 0.5354
Epoch 74/200
79/79 [==============================] - 12s 157ms/step - loss: 1.3974 - accuracy: 0.5264 - val_loss: 1.3751 - val_accuracy: 0.5354
Epoch 75/200
43/79 [===============>..............] - ETA: 5s - loss: 1.3721 - accuracy: 0.5385
%% Cell type:code id:deb51d3f tags:
``` python
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
```
%% Cell type:code id:9d231f71 tags:
``` python
import seaborn as sns
def predict_labels(dset):
predictions = model.predict(dset)
results = [tf.nn.softmax(row) for row in predictions]
results = [np.argmax(row) for row in results]
return results
def predict_class_label_number(dset):
label_names = in_label_names
return [label_names[o[0][0] + 1] for o in model.predict_top_k(dset, batch_size=128)]
def show_confusion_matrix(cm, labels):
plt.figure(figsize=(10, 8))
sns.heatmap(cm, xticklabels=labels, yticklabels=labels,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
true_classes = []
for img, labels in training_dset:
true_classes += [np.argmax(row) for row in labels]
predicted_classes = predict_labels(training_dset)
print(true_classes, predicted_classes)
conf_matrix = tf.math.confusion_matrix(true_classes, predicted_classes, num_classes = num_classes)
show_confusion_matrix(conf_matrix, [in_label_names[k+1] for k in range(num_classes)])
```
%% Cell type:code id:046d05fc tags:
``` python
```
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment