Commit 98dbc187 authored by Martin Řepa's avatar Martin Řepa

Prepare experiments for game theoretic latency model

parent 8285b235
......@@ -109,7 +109,7 @@ class GradientAttacker(Attacker):
i = torch.argmin(losses)
best_action = all_actions[i]
self.value_of_last_brp = -losses[i].item()
return [best_action[0].item(), best_action[1].item()]
return [best_action[i].item() for i in range(self.features_count)]
def _does_br_exists(self, played_actions_p1, br_p1, value):
return self.value_of_last_brp - value < self.conf.epsion
This diff is collapsed.
......@@ -110,17 +110,28 @@ class Synthesizer:
if __name__ == "__main__":
synt = Synthesizer(2)
synt = Synthesizer(3)
# synt.add_cluster_around_2Dfunc(lambda x: 0.8/(15*(x+.05)), 0.25, 15000)
# Generate normal distribution data
arr = np.random.multivariate_normal([0, 0], [[2, 1], [1, 1]], 10000)
# Generate 3D normal distribution data
arr = np.random.multivariate_normal([0, 0, 0], [[2, 1, 1], [1, 5, 1],
[1, 1, 2]], 10000)
arr = arr - np.min(arr, axis=0)
m = np.max(arr, axis=0)
arr = arr / (m + (m*0.2))
synt.add_raw_data(arr)
# synt = Synthesizer(2)
# Generate 2D normal distribution data
# arr = np.random.multivariate_normal([0, 0], [[2, 1], [1, 1]], 10000)
# arr = arr - np.min(arr, axis=0)
# m = np.max(arr, axis=0)
# arr = arr / (m + (m*0.2))
# synt.add_raw_data(arr)
# synt.add_cluster_around_2Dfunc(lambda x: 0.8/(15*(x+.05)), 0.25, 15000)
synt.generate()
synt.plot2D()
# synt.save_to_file(Path('scored/normal_distribution_experiments.csv'))
# synt.plot2D()
synt.save_to_file(Path('scored/normal_distribution_3D_experiments.csv'))
use_blocking_model: False
legacy_folder: /home/ignac/experiments/game_theory_model_latency/legacy1 # Change in regards with configuration
num_of_experiments: 5
nn_epochs: 10
i_a: 1
i_d: 2
benign_ratio: 10000
features_num: 2
data_file: normal_distribution_experiments.csv
experiments_per_setup: 5 # 6
legacy_folder: /home/ignac/experiments/game_theory_model_latency/2dimension_discrete_gradient_5times # Change in regards with configuration
attacker_mode: discrete # options are 'both', 'gradient', 'discrete'
conf:
attacker_epochs: 3000
defender_epochs: 4000
i_a: 1
i_d: 3
benign_ratio: 5
data_file: normal_distribution_experiments.csv
features: 2
......@@ -2,13 +2,13 @@ import os
import pickle
import sys
import time
from dataclasses import dataclass
from os.path import dirname
from pathlib import Path
from typing import List
import torch
import yaml
from dataclasses import dataclass
from src.config import RootConfig
from src.game import Game
......@@ -32,6 +32,7 @@ class DefenderAction:
@dataclass
class SubResult:
iterations: int
legacy_folder: str
time: float
zero_sum_game_value: float
......@@ -41,9 +42,15 @@ class SubResult:
defender_actions: List[DefenderAction]
@dataclass
class Setup:
name: str
results: List[SubResult]
@dataclass
class Experiment:
sub_results: List[SubResult]
setups: List[Setup]
def get_configuration() -> dict:
......@@ -53,33 +60,23 @@ def get_configuration() -> dict:
return yaml.load(content, Loader=yaml.FullLoader)
def get_root_conf(conf_of_conf: dict) -> RootConfig:
conf = RootConfig()
conf.debug = False
conf.plot_conf.plot_enabled = False
conf.model_conf.use_blocking_model = bool(conf_of_conf['use_blocking_model'])
conf.model_conf.set_ia_id_benign_ration(conf_of_conf['i_a'],
conf_of_conf['i_d'],
conf_of_conf['benign_ratio'])
conf.model_conf.set_data_file(conf_of_conf['data_file'])
conf.model_conf.defender_conf.nn_conf.epochs = conf_of_conf['nn_epochs']
return conf
def main(experiment_conf, base_dir):
conf = get_root_conf(experiment_conf)
repetitions = experiment_conf['num_of_experiments']
def exec_new_setup(conf: RootConfig, folder: str, iterations: int, name: str) -> Setup:
sub_results = []
for i in range(repetitions):
print(f'Starting {i+1}. iteration.')
os.mkdir(f'{base_dir}/{i}')
i = 0
while i < iterations:
if not os.path.exists(f'{folder}/{i}'):
os.mkdir(f'{folder}/{i}')
print(f'Starting {i+1}. iteration of setup with {name} attacker')
start = time.time()
result = Game(conf).solve_game()
time_taken = time.time() - start
if result.iterations == 1:
continue
attacker_actions = []
for p1_action, p1_prob in zip(result.ordered_actions_p1,
result.probs_p1):
......@@ -95,9 +92,10 @@ def main(experiment_conf, base_dir):
p2_action.final_loss,
p2_action.final_fp_cost))
torch.save(p2_action.model.state_dict(),
f'{base_dir}/{i}/{p2_action.id}.pt')
f'{folder}/{i}/{p2_action.id}.pt')
sub_result = SubResult(str(i),
sub_result = SubResult(result.iterations,
str(i),
time_taken,
result.zero_sum_nash_val,
result.attacker_value,
......@@ -105,8 +103,63 @@ def main(experiment_conf, base_dir):
attacker_actions,
defender_actions)
sub_results.append(sub_result)
i += 1
done_experiment = Experiment(sub_results)
final_setup = Setup(name, sub_results)
# ------------ Save backup data from this setup ---------
data_file = f'{folder}/backup_setup_data'
with open(data_file, 'wb') as file:
pickle.dump(final_setup, file)
# -------------------------------------------------------
return final_setup
def get_root_conf(conf_of_conf: dict) -> RootConfig:
conf = RootConfig()
conf.debug = False
conf.plot_conf.plot_enabled = False
conf.model_conf.attacker_conf.epochs = conf_of_conf['attacker_epochs']
conf.model_conf.defender_conf.nn_conf.epochs = conf_of_conf['defender_epochs']
conf.model_conf.features_count = conf_of_conf['features']
conf.model_conf.use_blocking_model = False
conf.model_conf.set_ia_id_benign_ration(conf_of_conf['i_a'],
conf_of_conf['i_d'],
conf_of_conf['benign_ratio'])
conf.model_conf.set_data_file(conf_of_conf['data_file'])
return conf
def main(experiment_conf, base_dir):
conf = get_root_conf(experiment_conf['conf'])
attacker_mode = experiment_conf['attacker_mode']
modes = None
if attacker_mode == 'both':
modes = [(True, 'gradient'), (False, 'discrete')]
elif attacker_mode == 'gradient':
modes = [(True, 'gradient')]
elif attacker_mode == 'discrete':
modes = [(False, 'discrete')]
else:
print('Invalid attacker mode: ' + str(attacker_mode))
exit(10)
setups = []
for use_gradient_descent, name in modes:
print(f'Let\'s do subexperiments for {name} attacker setup.')
folder = f'{base_dir}/{name}'
os.mkdir(folder)
print(f'Result is gonna be stored in {folder}.')
conf.model_conf.attacker_conf.use_gradient_descent = use_gradient_descent
setup = exec_new_setup(conf, folder,
experiment_conf['experiments_per_setup'], name)
setups.append(setup)
done_experiment = Experiment(setups)
print('Experiment done.')
# Save the result data
......@@ -142,5 +195,4 @@ if __name__ == "__main__":
sys.stderr = log
sys.stdout = log
main(experiment_conf, base_dir)
# main(experiment_conf, base_dir)
......@@ -29,7 +29,7 @@ class FakePlotter:
"""
Fake plotter doing nothing used when num_features != 2
"""
def plot_iteration(self):
def plot_iteration(self, *args):
pass
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment