-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
58 lines (47 loc) · 1.61 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import sys
sys.path.append("./scripts")
import os
import random
import torch
import torch.backends.cudnn as cudnn
import warnings
import numpy as np
from train import train
from config import get_args
from dataloader import Manufacturing_dataset
from utils import DataLoaderX, Repeat, setup_seed
from constant import SETTINGS
warnings.filterwarnings('ignore')
def main(args):
args.device = torch.device('cuda:0')
setting = SETTINGS.get(args.settings)
if args.dataset == "MvTecAD":
args.data_path = '../MvTecAD'
elif args.dataset == "Industrial_dataset":
args.data_path = '../Manufacturing_Dataset'
train_dataset = Manufacturing_dataset(
dataset_name = args.dataset,
dataset_path = args.data_path,
input_size = args.input_size,
is_labels = False,
is_train = True,
load_memory = False
)
train_dataset.configure_self_sup(self_sup_args=setting.get('self_sup_args'))
print("Dataset Num Classes: {}".format(train_dataset.__get_num_classes__()))
print("Dataset Num Samples: {}".format(train_dataset.__len__()))
args.num_classes = train_dataset.__get_num_classes__()
train_loader = DataLoaderX(
Repeat(train_dataset, 3),
# train_dataset,
pin_memory=True,
num_workers=args.workers,
batch_size=args.batch_size,
drop_last=True,
shuffle=False,
)
with torch.cuda.device(args.gpu_index):
train(train_dataset, train_loader, args)
if __name__ == "__main__":
args = get_args()
main(args)