diff --git a/Untitled.ipynb b/Untitled.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..4a8e0040caa5a23a7c07dd1a72f709676127dfb3
--- /dev/null
+++ b/Untitled.ipynb
@@ -0,0 +1,198 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9cd4d4bf",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!pip install matplotlib\n",
+    "import numpy as np\n",
+    "from tqdm import tqdm_notebook\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "%matplotlib inline\n",
+    "import tensorflow as tf\n",
+    "gpus = tf.config.experimental.list_physical_devices('GPU')\n",
+    "for gpu in gpus:\n",
+    "  tf.config.experimental.set_memory_growth(gpu, True)\n",
+    "\n",
+    "  \n",
+    "import h5py\n",
+    "\n",
+    "dataset=None\n",
+    "timerange=None\n",
+    "freqrange=None\n",
+    "ids=None\n",
+    "fpath = '/project/lofarsw/Data/Dynspec/dset.h5'\n",
+    "\n",
+    "with h5py.File(fpath, 'r') as fin:\n",
+    "  in_dataset=fin['/data'][:]\n",
+    "  in_timerange=fin['/time_range'][:]\n",
+    "  in_freqrange=fin['/freq_range'][:]\n",
+    "  in_ids=fin['timestamps'][:]\n",
+    "  \n",
+    "\n",
+    "g = np.where(~np.isnan(in_dataset.sum(axis=(1,2))))\n",
+    "\n",
+    "dataset = in_dataset[g]\n",
+    "timerange= in_timerange[g]\n",
+    "freqrange= in_freqrange[g]\n",
+    "ids = in_ids[g]\n",
+    "#avg = dataset[:, :, 404]\n",
+    "\n",
+    "\n",
+    "\n",
+    "import time\n",
+    "from IPython import display\n",
+    "\n",
+    "import os\n",
+    "class ConvVarAutoencoder(tf.keras.Model):\n",
+    "  def __init__(self, latent_dim, imsize):\n",
+    "    super().__init__(self)\n",
+    "    self.latent_dim = latent_dim\n",
+    "    shape = imsize\n",
+    "    self.encoder = tf.keras.Sequential(\n",
+    "      [\n",
+    "        tf.keras.layers.InputLayer(input_shape=imsize),\n",
+    "        tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=(2, 2), activation='relu'),\n",
+    "        #tf.keras.layers.MaxPool2D(pool_size=(3,3), strides=(2, 2)),\n",
+    "        tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=(2, 2), activation='relu'),\n",
+    "        tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=(2, 2), activation='relu'),\n",
+    "        \n",
+    "        tf.keras.layers.Flatten(),\n",
+    "        # No activation\n",
+    "        tf.keras.layers.Dense(latent_dim + latent_dim),\n",
+    "      ]\n",
+    "    )\n",
+    "    rescaled = (shape[0] // 4, shape[1] // 4)\n",
+    "    self.decoder = tf.keras.Sequential(\n",
+    "        [\n",
+    "            tf.keras.layers.InputLayer(input_shape=(latent_dim,)),\n",
+    "            tf.keras.layers.Dense(units=rescaled[0]*rescaled[1]*32, activation=tf.nn.relu),\n",
+    "            tf.keras.layers.Reshape(target_shape=(rescaled[0], rescaled[1], 32)),\n",
+    "            tf.keras.layers.Conv2DTranspose(\n",
+    "                filters=32,  kernel_size=(3,3), strides=2, padding='same',\n",
+    "                activation='relu'),\n",
+    "            tf.keras.layers.Conv2DTranspose(\n",
+    "                filters=32,  kernel_size=(3,3), strides=2, padding='same',\n",
+    "                activation='relu'),\n",
+    "            # No activation\n",
+    "            #tf.keras.layers.UpSampling2D(size=(3, 3), interpolation='nearest'),\n",
+    "            tf.keras.layers.Conv2DTranspose(\n",
+    "                filters=1, kernel_size=(3,3), strides=1, padding='same'),\n",
+    "        ]\n",
+    "    )\n",
+    "  @tf.function\n",
+    "  def sample(self, eps=None):\n",
+    "    if eps is None:\n",
+    "      eps = tf.random.normal(shape=(100, self.latent_dim))\n",
+    "    return self.decode(eps, apply_sigmoid=True)\n",
+    "\n",
+    "  def encode(self, x):\n",
+    "    mean, logvar = tf.split(self.encoder(x), num_or_size_splits=2, axis=1)\n",
+    "    return mean, logvar\n",
+    "\n",
+    "  def reparameterize(self, mean, logvar):\n",
+    "    eps = tf.random.normal(shape=mean.shape)\n",
+    "    return eps * tf.exp(logvar * .5) + mean\n",
+    "\n",
+    "  def decode(self, z, apply_sigmoid=False):\n",
+    "    logits = self.decoder(z)\n",
+    "    if apply_sigmoid:\n",
+    "      probs = tf.sigmoid(logits)\n",
+    "      return probs\n",
+    "    return logits\n",
+    "  \n",
+    "optimizer = tf.keras.optimizers.Adam(1e-4)\n",
+    "def log_normal_pdf(sample, mean, logvar, raxis=1):\n",
+    "  log2pi = tf.math.log(2. * np.pi)\n",
+    "  return tf.reduce_sum(\n",
+    "      -.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),\n",
+    "      axis=raxis)\n",
+    "\n",
+    "\n",
+    "def compute_loss(model, x):\n",
+    "  mean, logvar = model.encode(x[0])\n",
+    "  z = model.reparameterize(mean, logvar)\n",
+    "  x_logit = model.decode(z)\n",
+    "  cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x[1])\n",
+    "  logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])\n",
+    "  logpz = log_normal_pdf(z, 0., 0.)\n",
+    "  logqz_x = log_normal_pdf(z, mean, logvar)\n",
+    "  return -tf.reduce_mean(logpx_z + logpz - logqz_x)\n",
+    "\n",
+    "\n",
+    "@tf.function\n",
+    "def train_step(model, x, optimizer):\n",
+    "  \"\"\"Executes one training step and returns the loss.\n",
+    "\n",
+    "  This function computes the loss and gradients, and uses the latter to\n",
+    "  update the model's parameters.\n",
+    "  \"\"\"\n",
+    "  with tf.GradientTape() as tape:\n",
+    "    loss = compute_loss(model, x)\n",
+    "  gradients = tape.gradient(loss, model.trainable_variables)\n",
+    "  optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
+    "\n",
+    "epochs = 200\n",
+    "num_examples_to_generate = 16\n",
+    "\n",
+    "# keeping the random vector constant for generation (prediction) so\n",
+    "# it will be easier to see the improvement.\n",
+    "random_vector_for_generation = tf.random.normal(\n",
+    "    shape=[num_examples_to_generate, latent_dim])\n",
+    "model = ConvVarAutoencoder(latent_dim, training_dset.shape[1:])\n",
+    "os.makedirs('test', exist_ok=True)\n",
+    "def generate_and_save_images(model, epoch, test_sample):\n",
+    "  mean, logvar = model.encode(test_sample)\n",
+    "  z = model.reparameterize(mean, logvar)\n",
+    "  predictions = model.sample(z)\n",
+    "  fig = plt.figure(figsize=(16, 16))\n",
+    "\n",
+    "  for i in range(0, 8):\n",
+    "    plt.subplot(4, 4, i + 1)\n",
+    "    plt.imshow(np.array(predictions[i, :, :, 0]).T, cmap='viridis', origin='lower', aspect='auto', vmin=0, vmax=1)\n",
+    "    plt.xlabel('time')\n",
+    "    plt.ylabel('freq')\n",
+    "  for i in range(0, 8):\n",
+    "    plt.subplot(4, 4, i + 9)\n",
+    "    plt.imshow(np.array(test_sample[i, :, :, 0]).T, cmap='viridis', origin='lower', aspect='auto', vmin=0, vmax=1)\n",
+    "    plt.xlabel('freq')\n",
+    "    plt.ylabel('time')\n",
+    "  \n",
+    "\n",
+    "  # tight_layout minimizes the overlap between 2 sub-plots\n",
+    "  plt.savefig('test/image_at_epoch_{:04d}.png'.format(epoch))\n",
+    "  plt.show()\n",
+    "\n",
+    "assert batch_size >= num_examples_to_generate\n",
+    "for test_batch in test_dataset.take(1):\n",
+    "  test_sample = test_batch[0][0:num_examples_to_generate, :, :, :]\n",
+    "  "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}