Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion course_UvA-DL/01-introduction-to-pytorch/.meta.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 1: Introduction to PyTorch"
author: Phillip Lippe
created: 2021-08-27
updated: 2021-11-29
updated: 2023-01-04
license: CC BY-SA
description: |
This tutorial will give a short introduction to PyTorch basics, and get you setup for writing your own neural networks.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@

# Additionally, some operations on a GPU are implemented stochastic for efficiency
# We want to ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

# %% [markdown]
Expand Down
2 changes: 1 addition & 1 deletion course_UvA-DL/02-activation-functions/.meta.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 2: Activation Functions"
author: Phillip Lippe
created: 2021-08-27
updated: 2021-08-27
updated: 2023-01-04
license: CC BY-SA
description: |
In this tutorial, we will take a closer look at (popular) activation functions and investigate their effect on optimization properties in neural networks.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def set_seed(seed):

# Additionally, some operations on a GPU are implemented stochastic for efficiency
# We want to ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

# Fetching the device that will be used throughout this notebook
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 3: Initialization and Optimization"
author: Phillip Lippe
created: 2021-08-27
updated: 2021-11-29
updated: 2023-01-04
license: CC BY-SA
tags:
- Image
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

# Fetching the device that will be used throughout this notebook
Expand Down Expand Up @@ -937,8 +937,8 @@ def pathological_curve_loss(w1, w2):
def plot_curve(
curve_fn, x_range=(-5, 5), y_range=(-5, 5), plot_3d=False, cmap=cm.viridis, title="Pathological curvature"
):
fig = plt.figure()
ax = fig.gca(projection="3d") if plot_3d else fig.gca()
_ = plt.figure()
ax = plt.axes(projection="3d") if plot_3d else plt.axes()

x = torch.arange(x_range[0], x_range[1], (x_range[1] - x_range[0]) / 100.0)
y = torch.arange(y_range[0], y_range[1], (y_range[1] - y_range[0]) / 100.0)
Expand Down
3 changes: 2 additions & 1 deletion course_UvA-DL/04-inception-resnet-densenet/.meta.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 4: Inception, ResNet and DenseNet"
author: Phillip Lippe
created: 2021-08-27
updated: 2021-11-29
updated: 2023-01-04
license: CC BY-SA
tags:
- Image
Expand All @@ -18,5 +18,6 @@ requirements:
- matplotlib
- seaborn
- tabulate
- pytorch-lightning>=1.8
accelerator:
- GPU
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
Expand Down Expand Up @@ -348,7 +348,8 @@ def train_model(model_name, save_name=None, **kwargs):
trainer = pl.Trainer(
default_root_dir=os.path.join(CHECKPOINT_PATH, save_name), # Where to save models
# We run on a single GPU (if possible)
gpus=1 if str(device) == "cuda:0" else 0,
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
devices=1,
# How many epochs to train for if no patience is set
max_epochs=180,
callbacks=[
Expand All @@ -357,7 +358,7 @@ def train_model(model_name, save_name=None, **kwargs):
), # Save the best checkpoint based on the maximum val_acc recorded. Saves only weights and not optimizer
LearningRateMonitor("epoch"),
], # Log learning rate every epoch
progress_bar_refresh_rate=1,
enable_progress_bar=True,
) # In case your notebook crashes due to the progress bar, consider increasing the refresh rate
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
Expand Down
3 changes: 2 additions & 1 deletion course_UvA-DL/05-transformers-and-MH-attention/.meta.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 5: Transformers and Multi-Head Attention"
author: Phillip Lippe
created: 2021-06-30
updated: 2021-11-29
updated: 2023-01-04
license: CC BY-SA
build: 0
tags:
Expand All @@ -19,5 +19,6 @@ requirements:
- torchvision
- matplotlib
- seaborn
- pytorch-lightning>=1.8
accelerator:
- GPU
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
Expand Down Expand Up @@ -979,10 +979,11 @@ def train_reverse(**kwargs):
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=1 if str(device).startswith("cuda") else 0,
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
devices=1,
max_epochs=10,
gradient_clip_val=5,
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

Expand Down Expand Up @@ -1439,10 +1440,11 @@ def train_anomaly(**kwargs):
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=1 if str(device).startswith("cuda") else 0,
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
devices=1,
max_epochs=100,
gradient_clip_val=2,
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

Expand Down
7 changes: 4 additions & 3 deletions course_UvA-DL/06-graph-neural-networks/.meta.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 6: Basics of Graph Neural Networks"
author: Phillip Lippe
created: 2021-06-07
updated: 2021-12-04
updated: 2023-01-04
license: CC BY-SA
build: 0
tags:
Expand All @@ -19,10 +19,11 @@ description: |
The full list of tutorials can be found at https://uvadlc-notebooks.rtfd.io.
requirements:
- torch-scatter
- torch-sparse<0.6.13
- torch-sparse
- torch-cluster
- torch-spline-conv
- torch-geometric==2.0.2
- torch-geometric
- pytorch-lightning>=1.8
pip__find-link:
# - https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html
- https://pytorch-geometric.com/whl/torch-%(TORCH_MAJOR_DOT_MINOR)s.0+%(DEVICE)s.html
Expand Down
14 changes: 8 additions & 6 deletions course_UvA-DL/06-graph-neural-networks/GNN_overview.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

# %% [markdown]
Expand Down Expand Up @@ -634,7 +634,7 @@ def test_step(self, batch, batch_idx):
# Additionally to the Lightning module, we define a training function below.
# As we have a single graph, we use a batch size of 1 for the data loader and share the same data loader for the train,
# validation, and test set (the mask is picked inside the Lightning module).
# Besides, we set the argument `progress_bar_refresh_rate` to zero as it usually shows the progress per epoch,
# Besides, we set the argument `enable_progress_bar` to False as it usually shows the progress per epoch,
# but an epoch only consists of a single step.
# If you have downloaded the pre-trained models in the beginning of the tutorial, we load those instead of training from scratch.
# Finally, we test the model and return the results.
Expand All @@ -651,9 +651,10 @@ def train_node_classifier(model_name, dataset, **model_kwargs):
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=AVAIL_GPUS,
accelerator="gpu" if AVAIL_GPUS > 0 else "cpu",
devices=max(1, AVAIL_GPUS),
max_epochs=200,
progress_bar_refresh_rate=0,
enable_progress_bar=False,
) # 0 because epoch size is 1
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need

Expand Down Expand Up @@ -932,9 +933,10 @@ def train_graph_classifier(model_name, **model_kwargs):
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=AVAIL_GPUS,
accelerator="gpu" if AVAIL_GPUS > 0 else "cpu",
devices=max(1, AVAIL_GPUS),
max_epochs=500,
progress_bar_refresh_rate=0,
enable_progress_bar=False,
)
trainer.logger._default_hp_metric = None

Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 7: Deep Energy-Based Generative Models"
author: Phillip Lippe
created: 2021-07-12
updated: 2021-07-12
updated: 2023-01-04
license: CC BY-SA
build: 0
tags:
Expand All @@ -22,6 +22,7 @@ requirements:
- torchvision
- matplotlib
- tensorboard
- pytorch-lightning>=1.8
accelerator:
- CPU
- GPU
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
Expand Down Expand Up @@ -640,7 +640,8 @@ def train_model(**kwargs):
# Create a PyTorch Lightning trainer with the generation callback
trainer = pl.Trainer(
default_root_dir=os.path.join(CHECKPOINT_PATH, "MNIST"),
gpus=1 if str(device).startswith("cuda") else 0,
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
devices=1,
max_epochs=60,
gradient_clip_val=0.1,
callbacks=[
Expand All @@ -650,7 +651,7 @@ def train_model(**kwargs):
OutlierCallback(),
LearningRateMonitor("epoch"),
],
progress_bar_refresh_rate=1,
enable_progress_bar=True,
)
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "MNIST.ckpt")
Expand Down
3 changes: 2 additions & 1 deletion course_UvA-DL/08-deep-autoencoders/.meta.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 8: Deep Autoencoders"
author: Phillip Lippe
created: 2021-07-12
updated: 2021-07-12
updated: 2023-01-04
license: CC BY-SA
build: 0
tags:
Expand All @@ -22,6 +22,7 @@ requirements:
- torchvision
- matplotlib
- seaborn
- pytorch-lightning>=1.8
accelerator:
- CPU
- GPU
5 changes: 3 additions & 2 deletions course_UvA-DL/08-deep-autoencoders/Deep_Autoencoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
Expand Down Expand Up @@ -385,7 +385,8 @@ def train_cifar(latent_dim):
# Create a PyTorch Lightning trainer with the generation callback
trainer = pl.Trainer(
default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim),
gpus=1 if str(device).startswith("cuda") else 0,
accelerator="gpu" if str(device).startswith("cuda") else "cpu",
devices=1,
max_epochs=500,
callbacks=[
ModelCheckpoint(save_weights_only=True),
Expand Down
3 changes: 2 additions & 1 deletion course_UvA-DL/09-normalizing-flows/.meta.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
title: "Tutorial 9: Normalizing Flows for Image Modeling"
author: Phillip Lippe
created: 2021-06-07
updated: 2021-06-16
updated: 2023-01-04
license: CC BY-SA
build: 0
tags:
Expand All @@ -25,6 +25,7 @@ requirements:
- matplotlib
- seaborn
- tabulate
- pytorch-lightning>=1.8
accelerator:
- CPU
- GPU
Loading