-
Notifications
You must be signed in to change notification settings - Fork 6
/
raw_feats_citation.py
74 lines (62 loc) · 2.17 KB
/
raw_feats_citation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import time
import argparse
import numpy as np
import sklearn as sk
from utils import load_citation
from normalization import fetch_normalization
from transformation import fetch_transformation
from train import train_regression, test_regression
from models import get_model
import torch
from args import get_feat_args
datasets = [
"cora",
"citeseer",
"pubmed"
]
preps = [
'',
'GFT'
]
normalization_choices = [
'',
'AugNormAdj',
'LeftNorm',
'InvLap',
'CombLap',
'SymNormAdj',
'SymNormLap'
]
args = get_feat_args()
adj, features, labels, idx_train,\
idx_val, idx_test = load_citation(args.dataset,
normalization=args.normalization,
cuda=False)
model = get_model(model_opt=args.model,
nfeat=features.size(1),
nclass=labels.max().item()+1,
nhid=args.hidden,
dropout=args.dropout,
cuda=False)
# TODO: Calculate time here
features = features.numpy()
transformer = fetch_transformation(args.preprocess)
forward, invert, evals = transformer(adj.to_dense())
features = invert(forward(features,
i=args.first_component,\
k=args.num_component),
i=args.first_component,
k=args.num_component)
features = torch.FloatTensor(features).float()
model, acc_val, train_time = train_regression(model,
features[idx_train],
labels[idx_train],
features[idx_val],
labels[idx_val],
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_regression(model, features[idx_test], labels[idx_test])
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))