Skip to content

Commit

Permalink
Add ProGNN. TODO sparse version
Browse files Browse the repository at this point in the history
  • Loading branch information
Jeratt committed Dec 26, 2024
1 parent 9ac0316 commit e038a1a
Show file tree
Hide file tree
Showing 3 changed files with 227 additions and 105 deletions.
131 changes: 129 additions & 2 deletions experiments/attack_defense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from attacks.QAttack import qattack
from defense.JaccardDefense import jaccard_def
from attacks.metattack import meta_gradient_attack
from attacks.CLGA import CLGA_gpt
from defense.GNNGuard import gnnguard


Expand Down Expand Up @@ -1131,6 +1130,133 @@ def test_pgd():
print(f"Before PGD attack on graph (MUTAG dataset): {info_before_pgd_attack_on_graph}")
print(f"After PGD attack on graph (MUTAG dataset): {info_after_pgd_attack_on_graph}")

def test_pro():
from defense.ProGNN.prognn import ProGNNDefender
# my_device = device('cuda' if is_available() else 'cpu')
my_device = device('cpu')

full_name = None

full_name = ("single-graph", "Planetoid", 'Cora')

dataset, data, results_dataset_path = DatasetManager.get_by_full_name(
full_name=full_name,
dataset_ver_ind=0
)

# print(data.train_mask)

gnn = model_configs_zoo(dataset=dataset, model_name='gcn_gcn')

manager_config = ConfigPattern(
_config_class="ModelManagerConfig",
_config_kwargs={
"mask_features": [],
"optimizer": {
# "_config_class": "Config",
"_class_name": "Adam",
# "_import_path": OPTIMIZERS_PARAMETERS_PATH,
# "_class_import_info": ["torch.optim"],
"_config_kwargs": {},
}
}
)

# train_test_split = [0.8, 0.2]
# train_test_split = [0.6, 0.4]
steps_epochs = 200
gnn_model_manager = FrameworkGNNModelManager(
gnn=gnn,
dataset_path=results_dataset_path,
manager_config=manager_config,
modification=ModelModificationConfig(model_ver_ind=0, epochs=steps_epochs)
)

save_model_flag = False
# save_model_flag = True

# data.x = data.x.float()
gnn_model_manager.gnn.to(my_device)
data = data.to(my_device)

evasion_attack_config = ConfigPattern(
_class_name="FGSM",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"epsilon": 0.005,
}
)
fgsm_evasion_attack_config1 = ConfigPattern(
_class_name="FGSM",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"epsilon": 0.01,
}
)
at_evasion_defense_config = ConfigPattern(
_class_name="AdvTraining",
_import_path=EVASION_DEFENSE_PARAMETERS_PATH,
_config_class="EvasionDefenseConfig",
_config_kwargs={
"attack_name": None,
"attack_config": fgsm_evasion_attack_config1
}
)

gradientregularization_evasion_defense_config = ConfigPattern(
_class_name="GradientRegularizationDefender",
_import_path=EVASION_DEFENSE_PARAMETERS_PATH,
_config_class="EvasionDefenseConfig",
_config_kwargs={
"regularization_strength": 0.1 * 500
}
)

poison_defense_config = ConfigPattern(
_class_name="ProGNNDefender",
_import_path=POISON_DEFENSE_PARAMETERS_PATH,
_config_class="PoisonDefenseConfig",
_config_kwargs={
"epochs": 10
}
)


# gnn_model_manager.set_poison_attacker(poison_attack_config=poison_attack_config)
gnn_model_manager.set_poison_defender(poison_defense_config=poison_defense_config)
# gnn_model_manager.set_evasion_attacker(evasion_attack_config=netattackgroup_evasion_attack_config)
# gnn_model_manager.set_evasion_defender(evasion_defense_config=gradientregularization_evasion_defense_config)

warnings.warn("Start training")
dataset.train_test_split()

try:
raise FileNotFoundError()
# gnn_model_manager.load_model_executor()
except FileNotFoundError:
gnn_model_manager.epochs = gnn_model_manager.modification.epochs = 0
train_test_split_path = gnn_model_manager.train_model(gen_dataset=dataset, steps=steps_epochs,
save_model_flag=save_model_flag,
metrics=[Metric("F1", mask='train', average=None),
Metric("Accuracy", mask="train")])

if train_test_split_path is not None:
dataset.save_train_test_mask(train_test_split_path)
train_mask, val_mask, test_mask, train_test_sizes = torch.load(train_test_split_path / 'train_test_split')[
:]
dataset.train_mask, dataset.val_mask, dataset.test_mask = train_mask, val_mask, test_mask
data.percent_train_class, data.percent_test_class = train_test_sizes

warnings.warn("Training was successful")


metric_loc = gnn_model_manager.evaluate_model(
gen_dataset=dataset, metrics=[Metric("F1", mask='test', average='macro'),
Metric("Accuracy", mask='test')])
print("TEST", metric_loc)


def exp_pipeline():
dataset_grid = ['Cora', 'Photo']
Expand All @@ -1154,8 +1280,9 @@ def exp_pipeline():

# random.seed(10)
# test_attack_defense()
exp_pipeline()
# exp_pipeline()
# torch.manual_seed(5000)
# test_gnnguard()
# test_jaccard()
# test_pgd()
test_pro()
11 changes: 10 additions & 1 deletion metainfo/poison_defense_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,15 @@
"attention": ["attention", "bool", true, {}, "?"],
"drop": ["drop", "bool", true, {}, "?"],
"train_iters": ["train_iters", "int", 50, {}, "?"]
}
},
"ProGNNDefender": {
"symmetric": ["symmetric", "bool", true, {}, "?"],
"lr_adj": ["lr_adj", "float", 0.01, {"min": 0.0001, "step": 0.005}, "?"],
"alpha": ["alpha", "float", 5e-4, {"min": 0.0, "step": 0.0001}, "?"],
"beta": ["beta", "float", 1.5, {"min": 0.0, "step": 0.01}, "?"],
"epochs": ["epochs", "int", 400, {"min": 1, "step": 1}, "?"],
"lambda_": ["lambda_", "float", 0, {"min": 0.0, "step": 0.001}, "?"],
"phi": ["phi", "float", 0, {"min": 0.0, "step": 0.01}, "?"]
}
}

Loading

0 comments on commit e038a1a

Please sign in to comment.