Skip to content

Commit

Permalink
Merge pull request #19 from RolnickLab/new-gnn
Browse files Browse the repository at this point in the history
  • Loading branch information
vict0rsch authored Sep 29, 2022
2 parents 4ca9b47 + fc94da7 commit d70a4c9
Show file tree
Hide file tree
Showing 52 changed files with 4,536 additions and 248 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ results
logs
*.traj
experimental
*.pkl

# Byte-compiled / optimized / DLL files
__pycache__/
Expand Down
2 changes: 2 additions & 0 deletions configs/is2re/100k/dimenet_plus_plus/new_dpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ model:
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}

# *** Important note ***
# The total number of gpus used for this run was 1.
Expand Down
3 changes: 3 additions & 0 deletions configs/is2re/100k/forcenet/new_forcenet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,12 @@ model:
# drlab attributes:
tag_hidden_channels: 0 # 64
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_hidden_channels: 0
phys_embeds: False # True
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}

# *** Important note ***
# The total number of gpus used for this run was 8.
Expand Down
2 changes: 2 additions & 0 deletions configs/is2re/100k/schnet/new_schnet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ model:
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: weighted-av-initial-embeds # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, diff_pooling}

# *** Important note ***
# The total number of gpus used for this run was 1.
Expand Down
42 changes: 42 additions & 0 deletions configs/is2re/100k/sfarinet/sfarinet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
includes:
- configs/is2re/100k/base.yml

model:
name: sfarinet
hidden_channels: 256
num_filters: 128
num_interactions: 3
num_gaussians: 100
cutoff: 6.0
use_pbc: True
regress_forces: False
# drlab attributes:
tag_hidden_channels: 0 # 32
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_embeds: False # True
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}
# *** Important note ***
# The total number of gpus used for this run was 1.
# If the global batch size (num_gpus * batch_size) is modified
# the lr_milestones and warmup_steps need to be adjusted accordingly.

optim:
batch_size: 64
eval_batch_size: 64
num_workers: 4
lr_initial: 0.005
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 1562
- 2343
- 3125
warmup_steps: 468
warmup_factor: 0.2
max_epochs: 20

frame_averaging: False # 2D, 3D, False
choice_fa: False # can be {None, full, random, det, e3-full, e3-random, e3-det}
2 changes: 2 additions & 0 deletions configs/is2re/10k/dimenet_plus_plus/new_dpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ model:
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}

# *** Important note ***
# The total number of gpus used for this run was 1.
Expand Down
50 changes: 50 additions & 0 deletions configs/is2re/10k/fanet/fanet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
includes:
- configs/is2re/10k/base.yml

model:
name: fanet
hidden_channels: 256
num_filters: 128
num_interactions: 3
num_gaussians: 100
cutoff: 6.0
use_pbc: True
regress_forces: False
# drlab attributes:
tag_hidden_channels: 0 # 32
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_embeds: False # True
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}

# new features
skip_co: False # output skip connections
normalized_rel_pos: False # normalize r_ij + squash in [0,1]
second_layer_MLP: False # in EmbeddingBlock
mlp_rij: 0 # apply mlp to r_ij
complex_mp: False # concat (e_ij || h_i || h_j) for W in MP

# *** Important note ***
# The total number of gpus used for this run was 1.
# If the global batch size (num_gpus * batch_size) is modified
# the lr_milestones and warmup_steps need to be adjusted accordingly.

optim:
batch_size: 64
eval_batch_size: 64
num_workers: 4
lr_initial: 0.005
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 1562
- 2343
- 3125
warmup_steps: 468
warmup_factor: 0.2
max_epochs: 20

frame_averaging: False # 2D, 3D, da, False
choice_fa: False # can be {None, full, random, det, e3, e3-random, e3-det}
9 changes: 6 additions & 3 deletions configs/is2re/10k/forcenet/new_forcenet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,14 @@ model:
max_n: 3
use_pbc: True
# drlab attributes:
tag_hidden_channels: 64 # 64
pg_hidden_channels: 32 # 32 -> period & group embedding hidden channels
phys_embeds: True # True
tag_hidden_channels: 0 # 64
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_embeds: False # True
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}

# *** Important note ***
# The total number of gpus used for this run was 8.
Expand Down
3 changes: 2 additions & 1 deletion configs/is2re/10k/schnet/new_schnet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ model:
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}

energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}
# *** Important note ***
# The total number of gpus used for this run was 1.
# If the global batch size (num_gpus * batch_size) is modified
Expand Down
42 changes: 42 additions & 0 deletions configs/is2re/10k/sfarinet/sfarinet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
includes:
- configs/is2re/10k/base.yml

model:
name: sfarinet
hidden_channels: 256
num_filters: 128
num_interactions: 3
num_gaussians: 100
cutoff: 6.0
use_pbc: True
regress_forces: False
# drlab attributes:
tag_hidden_channels: 0 # 32
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_embeds: False # True
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}
# *** Important note ***
# The total number of gpus used for this run was 1.
# If the global batch size (num_gpus * batch_size) is modified
# the lr_milestones and warmup_steps need to be adjusted accordingly.

optim:
batch_size: 64
eval_batch_size: 64
num_workers: 4
lr_initial: 0.005
lr_gamma: 0.1
lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma
- 1562
- 2343
- 3125
warmup_steps: 468
warmup_factor: 0.2
max_epochs: 20

frame_averaging: False # 2D, 3D, da, False
choice_fa: False # can be {None, full, random, det, e3-full, e3-random, e3-det}
2 changes: 2 additions & 0 deletions configs/is2re/all/dimenet_plus_plus/new_dpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ model:
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}

# *** Important note ***
# The total number of gpus used for this run was 4.
Expand Down
49 changes: 49 additions & 0 deletions configs/is2re/all/fanet/fanet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
includes:
- configs/is2re/all/base.yml

model:
name: fanet
hidden_channels: 384
num_filters: 128
num_interactions: 4
num_gaussians: 100
cutoff: 6.0
use_pbc: True
regress_forces: False
# drlab attributes:
tag_hidden_channels: 0 # 32
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_embeds: False # True
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}
# Ablation
skip_co: False # output skip connections
normalized_rel_pos: False # normalize r_ij + squash in [0,1]
second_layer_MLP: False # in EmbeddingBlock
mlp_rij: 0 # apply mlp to r_ij
complex_mp: false # concat (e_ij || h_i || h_j) for W in MP

# *** Important note ***
# The total number of gpus used for this run was 1.
# If the global batch size (num_gpus * batch_size) is modified
# the lr_milestones and warmup_steps need to be adjusted accordingly.

optim:
batch_size: 64
eval_batch_size: 64
num_workers: 4
lr_initial: 0.001
lr_gamma: 0.1
lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma
- 16000
- 25000
- 35000
warmup_steps: 5394
warmup_factor: 0.2
max_epochs: 17

frame_averaging: False # False, 2D, 3D, da
choice_fa: False # can be {None, full, random, det, e3-full, e3-random, e3-det}
12 changes: 8 additions & 4 deletions configs/is2re/all/forcenet/new_forcenet.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
includes:
- configs/is2re/all/base.yml
- configs/is2re/all/base.yml

model:
name: new_forcenet
Expand All @@ -20,8 +20,12 @@ model:
tag_hidden_channels: 0 # 64
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_embeds: False # True
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
phys_hidden_channels: 0
graph_rewiring:
False # {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}

# *** Important note ***
# The total number of gpus used for this run was 8.
Expand All @@ -33,7 +37,7 @@ optim:
eval_batch_size: 8
num_workers: 8
lr_initial: 0.0005
max_epochs: 20
max_epochs: 8 # 20
energy_coefficient: 0
lr_gamma: 0.1
lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma
Expand Down
3 changes: 2 additions & 1 deletion configs/is2re/all/schnet/new_schnet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ model:
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}

energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}
# *** Important note ***
# The total number of gpus used for this run was 4.
# If the global batch size (num_gpus * batch_size) is modified
Expand Down
42 changes: 42 additions & 0 deletions configs/is2re/all/sfarinet/sfarinet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
includes:
- configs/is2re/all/base.yml

model:
name: sfarinet
hidden_channels: 384
num_filters: 128
num_interactions: 4
num_gaussians: 100
cutoff: 6.0
use_pbc: True
regress_forces: False
# drlab attributes:
tag_hidden_channels: 0 # 32
pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels
phys_embeds: False # True
phys_hidden_channels: 0
graph_rewiring: False # can be in {false, remove-tag-0, one-supernode-per-graph,
# one-supernode-per-atom-type, one-supernode-per-atom-type-dist}
energy_head: False # can be {False, weighted-av-initial-embeds,
# weighted-av-final-embeds, pooling, graclus, random}
# *** Important note ***
# The total number of gpus used for this run was 1.
# If the global batch size (num_gpus * batch_size) is modified
# the lr_milestones and warmup_steps need to be adjusted accordingly.

optim:
batch_size: 64
eval_batch_size: 64
num_workers: 4
lr_initial: 0.001
lr_gamma: 0.1
lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma
- 17981
- 26972
- 35963
warmup_steps: 5394
warmup_factor: 0.2
max_epochs: 17

frame_averaging: False # 2D, 3D, da, False
choice_fa: False # can be {None, full, random, det, e3-full, e3-random, e3-det}
4 changes: 4 additions & 0 deletions configs/sbatch/alex.hernandez-garcia.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Overwrites defaults.yaml for user `schmidtv`.
# Create your own $USER.yaml in order to overwrite defaults.yaml systematically to your own taste.
virtualenv: True
env: /home/mila/a/alex.hernandez-garcia/.virtualenvs/ocp-torch1110cuda102
3 changes: 3 additions & 0 deletions configs/sbatch/alexandre.duval.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
mem: 32GB
env: ocp
partition: long
4 changes: 4 additions & 0 deletions configs/sbatch/defaults.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,4 +32,8 @@ py_args: "" # arguments for main.py
note: "" # wandb run note
git_checkout: null # if null, no checkout. Use as `git_checkout=some-branch` or `git_checkout=somecommithash`

sweep: false
count: 0
array: 0

dev: false
7 changes: 7 additions & 0 deletions configs/sweep/defaults.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# default minydra args for sweep.py

method: random
params: sweep_wandb_all.yml
count: 1
name: null
mode: run_jobs # "run_jobs" or "print_commands"
Loading

0 comments on commit d70a4c9

Please sign in to comment.