Commit 86c29f70 authored by Martin Řepa's avatar Martin Řepa

Added experiments setup

parent e26082f6
materials
app.egg-info
.idea
report
results
src/data/raw
images
venv/*
\ No newline at end of file
......@@ -11,3 +11,5 @@ report/*
!report/*.tex
!report/*.bib
!report/*.pdf
images
\ No newline at end of file
ARG IMAGE=python3.7-cuda:latest
FROM ${IMAGE} as common
ENV APP_DIR /app
WORKDIR ${APP_DIR}
ARG DEVICE=gpu
ENV device ${DEVICE}
# precompile wheels ----------------------------------------------------
FROM common AS install
COPY --from=wheelcache-bp:latest /wheels /wheels/
WORKDIR /src
COPY requirements.txt /src/
RUN pip3.7 install wheel
RUN pip3.7 wheel -r requirements.txt --find-links=/wheels --wheel-dir=/wheels
RUN pip3.7 install -r requirements.txt --find-links=/wheels
# store wheels ---------------------------------------------------------
FROM scratch AS wheelcache
COPY --from=install /wheels /wheels/
# image ----------------------------------------------------------------
FROM common AS image
COPY --from=install /usr/local /usr/local
RUN mkdir -p /home/ignac/experiments/learning_epochs \
/home/ignac/experiments/lp_blocking \
/home/ignac/experiments/lp_latency \
/home/ignac/experiments/game_theory_model_latency \
/home/ignac/experiments/game_theory_model_blocking
COPY src/ ${APP_DIR}/src/
COPY setup.py start_experiments.sh ${APP_DIR}/
CMD ["./start_experiments.sh"]
WHEELCACHE=wheelcache-bp:latest
cpu-image:
docker build --build-arg IMAGE=python:3.7-slim --build-arg DEVICE=cpu --target wheelcache -t $(WHEELCACHE) .
docker build --build-arg IMAGE=python:3.7-slim --build-arg DEVICE=cpu --target image -t experiments-cpu:latest .
gpu-image:
docker build --target wheelcache -t $(WHEELCACHE) .
docker build --target image -t experiments-cpu:latest .
......@@ -15,6 +15,7 @@ sklearn = "*"
tensorflow = "*"
matplotlib = "*"
torch = "*"
pyyaml = "*"
[requires]
python_version = "3.7"
{
"_meta": {
"hash": {
"sha256": "45798a30df6e2c3a6012896cf83a70b58a21f44ca2a3ee42af00d15432d1917a"
"sha256": "4e3097a6d69d42c2ab229045c4c3ae51a48bda453eee9cf07589971c7feddfba"
},
"pipfile-spec": 6,
"requires": {
......@@ -177,10 +177,10 @@
},
"markdown": {
"hashes": [
"sha256:c00429bd503a47ec88d5e30a751e147dcb4c6889663cd3e2ba0afe858e009baa",
"sha256:d02e0f9b04c500cde6637c11ad7c72671f359b87b9fe924b2383649d8841db7c"
"sha256:fc4a6f69a656b8d858d7503bda633f4dd63c2d70cf80abdc6eafa64c4ae8c250",
"sha256:fe463ff51e679377e3624984c829022e2cfb3be5518726b06f608a07a3aad680"
],
"version": "==3.0.1"
"version": "==3.1"
},
"matplotlib": {
"hashes": [
......@@ -272,26 +272,26 @@
},
"protobuf": {
"hashes": [
"sha256:03666634d038e35d90155756914bc3a6316e8bcc0d300f3ee539e586889436b9",
"sha256:049d5900e442d4cc0fd2afd146786b429151e2b29adebed28e6376026ab0ee0b",
"sha256:0eb9e62a48cc818b1719b5035042310c7e4f57b01f5283b32998c68c2f1c6a7c",
"sha256:255d10c2c9059964f6ebb5c900a830fc8a089731dda94a5cc873f673193d208b",
"sha256:358cc59e4e02a15d3725f204f2eb5777fc10595e2d9a9c4c8d82292f49af6d41",
"sha256:41f1b737d5f97f1e2af23d16fac6c0b8572f9c7ea73054f1258ca57f4f97cb80",
"sha256:6a5129576a2cf925cd100e06ead5f9ae4c86db70a854fb91cedb8d680112734a",
"sha256:80722b0d56dcb7ca8f75f99d8dadd7c7efd0d2265714d68f871ed437c32d82b3",
"sha256:88a960e949ec356f7016d84f8262dcff2b842fca5355b4c1be759f5c103b19b3",
"sha256:97872686223f47d95e914881cb0ca46e1bc622562600043da9edddcb54f2fe1e",
"sha256:a1df9d22433ab44b7c7e0bd33817134832ae8a8f3d93d9b9719fc032c5b20e96",
"sha256:ad385fbb9754023d17be14dd5aa67efff07f43c5df7f93118aef3c20e635ea19",
"sha256:b2d5ee7ba5c03b735c02e6ae75fd4ff8c831133e7ca078f2963408dc7beac428",
"sha256:c8c07cd8635d45b28ec53ee695e5ac8b0f9d9a4ae488a8d8ee168fe8fc75ba43",
"sha256:d44ebc9838b183e8237e7507885d52e8d08c48fdc953fd4a7ee3e56cb9d20977",
"sha256:dff97b0ee9256f0afdfc9eaa430736cdcdc18899d9a666658f161afd137cf93d",
"sha256:e47d248d614c68e4b029442de212bdd4f6ae02ae36821de319ae90314ea2578c",
"sha256:e650b521b429fed3d525428b1401a40051097a5a92c30076c91f36b31717e087"
],
"version": "==3.7.0"
"sha256:21e395d7959551e759d604940a115c51c6347d90a475c9baf471a1a86b5604a9",
"sha256:57e05e16955aee9e6a0389fcbd58d8289dd2420e47df1a1096b3a232c26eb2dd",
"sha256:67819e8e48a74c68d87f25cad9f40edfe2faf278cdba5ca73173211b9213b8c9",
"sha256:75da7d43a2c8a13b0bc7238ab3c8ae217cbfd5979d33b01e98e1f78defb2d060",
"sha256:78e08371e236f193ce947712c072542ff19d0043ab5318c2ea46bbc2aaebdca6",
"sha256:7ee5b595db5abb0096e8c4755e69c20dfad38b2d0bcc9bc7bafc652d2496b471",
"sha256:86260ecfe7a66c0e9d82d2c61f86a14aa974d340d159b829b26f35f710f615db",
"sha256:92c77db4bd33ea4ee5f15152a835273f2338a5246b2cbb84bab5d0d7f6e9ba94",
"sha256:9c7b90943e0e188394b4f068926a759e3b4f63738190d1ab3d500d53b9ce7614",
"sha256:a77f217ea50b2542bae5b318f7acee50d9fc8c95dd6d3656eaeff646f7cab5ee",
"sha256:ad589ed1d1f83db22df867b10e01fe445516a5a4d7cfa37fe3590a5f6cfc508b",
"sha256:b06a794901bf573f4b2af87e6139e5cd36ac7c91ac85d7ae3fe5b5f6fc317513",
"sha256:bd8592cc5f8b4371d0bad92543370d4658dc41a5ccaaf105597eb5524c616291",
"sha256:be48e5a6248a928ec43adf2bea037073e5da692c0b3c10b34f9904793bd63138",
"sha256:cc5eb13f5ccc4b1b642cc147c2cdd121a34278b341c7a4d79e91182fff425836",
"sha256:cd3b0e0ad69b74ee55e7c321f52a98effed2b4f4cc9a10f3683d869de00590d5",
"sha256:d6e88c4920660aa75c0c2c4b53407aef5efd9a6e0ca7d2fc84d79aba2ccbda3a",
"sha256:ec3c49b6d247152e19110c3a53d9bb4cf917747882017f70796460728b02722e"
],
"version": "==3.7.1"
},
"pulp": {
"hashes": [
......@@ -302,10 +302,10 @@
},
"pyparsing": {
"hashes": [
"sha256:66c9268862641abcac4a96ba74506e594c884e3f57690a696d21ad8210ed667a",
"sha256:f6c5ef0d7480ad048c054c37632c67fca55299990fff127850181659eea33fc3"
"sha256:1873c03321fc118f4e9746baf201ff990ceb915f433f23b395f5580d1840cb2a",
"sha256:9b6323ef4ab914af344ba97510e966d64ba91055d6b9afa6b30799340e89cc03"
],
"version": "==2.3.1"
"version": "==2.4.0"
},
"python-dateutil": {
"hashes": [
......@@ -316,10 +316,27 @@
},
"pytz": {
"hashes": [
"sha256:32b0891edff07e28efe91284ed9c31e123d84bea3fd98e1f72be2508f43ef8d9",
"sha256:d5f05e487007e29e03409f9398d074e158d920d36eb82eaf66fb1136b0c5374c"
"sha256:303879e36b721603cc54604edcac9d20401bdbe31e1e4fdee5b9f98d5d31dfda",
"sha256:d747dd3d23d77ef44c6a3526e274af6efeb0a6f1afd5a69ba4d5be4098c8e141"
],
"version": "==2018.9"
"version": "==2019.1"
},
"pyyaml": {
"hashes": [
"sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c",
"sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95",
"sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2",
"sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4",
"sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad",
"sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba",
"sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1",
"sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e",
"sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673",
"sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13",
"sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19"
],
"index": "pypi",
"version": "==5.1"
},
"scikit-learn": {
"hashes": [
......@@ -458,10 +475,10 @@
},
"werkzeug": {
"hashes": [
"sha256:c3fd7a7d41976d9f44db327260e263132466836cef6f91512889ed60ad26557c",
"sha256:d5da73735293558eb1651ee2fddc4d0dedcfa06538b8813a2e20011583c9e49b"
"sha256:0a73e8bb2ff2feecfc5d56e6f458f5b99290ef34f565ffb2665801ff7de6af7a",
"sha256:7fad9770a8778f9576693f0cc29c7dcc36964df916b83734f4431c0e612a7fbc"
],
"version": "==0.14.1"
"version": "==0.15.2"
},
"wheel": {
"hashes": [
......
-i https://pypi.org/simple
absl-py==0.7.1
astor==0.7.1
attr==0.3.1
attrs==19.1.0
cycler==0.10.0
gast==0.2.2
grpcio==1.19.0
h5py==2.9.0
kiwisolver==1.0.1
markdown==3.1
matplotlib==3.0.3
mock==2.0.0
numpy==1.16.2
pandas==0.24.2
pbr==5.1.3
protobuf==3.7.1
pulp==1.6.9
pyparsing==2.4.0
python-dateutil==2.8.0
pytz==2019.1
pyyaml==5.1
scikit-learn==0.20.3
scipy==1.2.1
six==1.12.0
sklearn==0.0
termcolor==1.1.0
torch==1.0.1.post2
werkzeug==0.15.2
wheel==0.33.1 ; python_version >= '3'
import itertools
import logging
import operator
import os
from typing import List
import numpy as np
import torch
from config import ModelConfig
from src.config import ModelConfig
logger = logging.getLogger(__name__)
DEVICE = torch.device('cuda')
DEVICE = torch.device(os.environ.get('device', 'cuda'))
class Attacker:
def __init__(self, model_conf: ModelConfig):
......@@ -29,12 +31,15 @@ class Attacker:
# return self.random_action()
def get_best_response(self, def_actions: List, def_probs: List):
logger.info('Attacker searching for response')
# Take only defenders actions which are played with non zero probability
non_zero_p = np.where(np.asarray(def_probs) != 0)
actions = np.asarray(def_actions)[non_zero_p]
probs = np.asarray(def_probs)[non_zero_p]
return self._get_best_response(actions, probs)
br = self._get_best_response(actions, probs)
logger.info(f'Attacker found BR {br}')
return br
def does_br_exists(self, played_actions_p1, br_p1, value):
it_does = self._does_br_exists(played_actions_p1, br_p1, value)
......
from typing import List
from config import ModelConfig
from data.loader import np_matrix_from_scored_csv
from neural_networks.network import NeuralNetwork, FormattedData, BenignData
from src.config import ModelConfig
from src.data.loader import np_matrix_from_scored_csv
from src.neural_networks.network import NeuralNetwork, FormattedData, BenignData
import numpy as np
import logging
......@@ -28,6 +28,7 @@ class Defender:
self.benign_data = prepare_benign_data(raw_x)
def get_best_response(self, att_actions: List, att_probs: List) -> NeuralNetwork:
logger.info('Defender searching for response')
# Take only attacker actions which are played with non zero probability
non_zero_p = np.where(np.asarray(att_probs) != 0)
......@@ -46,17 +47,7 @@ class Defender:
if new_nn.final_loss < best_nn.final_loss:
best_nn = new_nn
# If my response is to block nothing it might be hard to train so
# I try 10 more nns to train # TODO check if this does not get stuck
# attacker_goal = np.sum(np.prod(attack.unique_x, axis=1)*attack.probs_x)
# tries = 0
# while abs(best_nn.final_loss - attacker_goal) < 1e-5 and tries < 10:
# tries += 1
# new_nn = self._train_nn(attack)
# self._log_creation(new_nn, best_nn)
# if new_nn.final_loss < best_nn.final_loss:
# best_nn = new_nn
logger.info(f'Defender found new br neural network with id {best_nn.id}')
return best_nn
def _log_creation(self, new_nn, best_nn):
......
......@@ -2,7 +2,7 @@ from typing import Callable
import attr
from utility import *
from src.utility import *
@attr.s
......@@ -27,7 +27,7 @@ class DefenderConfig:
# New actor's action is considered already played in the game if difference
# between utility of new action and nash equilibrium value of zero-sum game
# is less than this value
defender_epsilon: float = attr.ib(default=1e-3)
defender_epsilon: float = attr.ib(default=1e-2)
# This number of neural networks will be trained in each double oracle
# iteration and the best one will be considered as a best response
......@@ -54,7 +54,7 @@ class AttackerConfig:
# New actor's action is considered already played in the game if difference
# between utility of new action and nash equilibrium value of zero-sum game
# is less than this value
epsion: float = attr.ib(default=1e-3)
epsion: float = attr.ib(default=1e-2)
# Number of random tries to find attacker action using gradient descent.
# The one with best final loss value would be chosen.
......@@ -75,7 +75,7 @@ class AttackerConfig:
@attr.s
class ModelConfig:
# Use blocking or latency model?
use_blocking_model: bool = attr.ib(default=False)
use_blocking_model: bool = attr.ib(default=True)
# Number of features
features_count: int = attr.ib(default=2)
......@@ -90,7 +90,7 @@ class ModelConfig:
i_a: int = attr.ib(default=1)
# i_d, used only for latency
i_d: int = attr.ib(default=4)
i_d: int = attr.ib(default=2)
# malicious : benign ratio in datatests
benign_ratio: int = attr.ib(default=10)
......@@ -116,6 +116,15 @@ class ModelConfig:
self.defender_conf.nn_conf.loss_function = \
get_latency_nn_loss_function(self.i_a, self.i_d, self.benign_ratio)
def set_ia_id_benign_ration(self, i_a, i_d, benign_ration):
self.i_a = i_a
self.i_d = i_d
self.benign_ratio = benign_ration
self.__attrs_post_init__()
def set_data_file(self, name: str):
self.defender_conf.benign_data_file_name = name
@attr.s
class PlotterConfig:
......
use_blocking_model: True
legacy_folder: /home/ignac/experiments/game_theory_model_blocking/legacy1
num_of_experiments: 2
nn_epochs: 2000
benign_ratio: 10000
features_num: 2
data_file: normal_distribution_experiments.csv
import os
import pickle
import time
from dataclasses import dataclass
from os.path import dirname
from pathlib import Path
from typing import List
import torch
import yaml
from src.config import RootConfig
from src.game import Game
CONFIG_FILE = 'configuration.yaml'
@dataclass
class AttackerAction:
action: List[float]
prob: float
@dataclass
class DefenderAction:
model_file_id: str
prob: float
loss: float
fp_part: float
@dataclass
class SubResult:
legacy_folder: str
time: float
zero_sum_game_value: float
almost_zero_attacker_value: float
almost_zero_defender_value: float
attacker_actions: List[AttackerAction]
defender_actions: List[DefenderAction]
@dataclass
class Experiment:
sub_results: List[SubResult]
def get_configuration() -> dict:
with open(Path(dirname(__file__)) / CONFIG_FILE, 'r', encoding='utf-8')\
as file:
content = file.read()
return yaml.load(content, Loader=yaml.FullLoader)
def get_root_conf(conf_of_conf: dict) -> RootConfig:
conf = RootConfig()
conf.debug = False
conf.plot_conf.plot_enabled = False
conf.model_conf.use_blocking_model = bool(conf_of_conf['use_blocking_model'])
conf.model_conf.set_ia_id_benign_ration(.5, .5, # W/e these values
conf_of_conf['benign_ratio'])
conf.model_conf.set_data_file(conf_of_conf['data_file'])
conf.model_conf.defender_conf.nn_conf.epochs = conf_of_conf['nn_epochs']
return conf
if __name__ == "__main__":
print('Starting game theory blocking model experiment')
experiment_conf = get_configuration()
base_dir = experiment_conf["legacy_folder"]
if not os.path.exists(base_dir):
print(f'Creating base dir {base_dir}')
os.mkdir(base_dir)
conf = get_root_conf(experiment_conf)
repetitions = experiment_conf['num_of_experiments']
sub_results = []
for i in range(repetitions):
print(f'Starting {i+1}. iteration.')
os.mkdir(f'{base_dir}/{i}')
start = time.time()
result = Game(conf).solve_game()
time_taken = time.time() - start
attacker_actions = []
for p1_action, p1_prob in zip(result.ordered_actions_p1,
result.probs_p1):
if p1_prob == 0: continue
attacker_actions.append(AttackerAction(p1_action, p1_prob))
defender_actions = []
for p2_action, p2_prob in zip(result.ordered_actions_p2,
result.probs_p2):
if p2_prob == 0: continue
defender_actions.append(DefenderAction(p2_action.id,
p2_prob,
p2_action.final_loss,
p2_action.final_fp_cost))
torch.save(p2_action.model.state_dict(),
f'{base_dir}/{i}/{p2_action.id}.pt')
sub_result = SubResult(str(i),
time_taken,
result.zero_sum_nash_val,
result.attacker_value,
result.defender_value,
attacker_actions,
defender_actions)
sub_results.append(sub_result)
done_experiment = Experiment(sub_results)
print('Experiment done.')
# Save the result data
data_file = f'{base_dir}/data'
print(f'Saving result data to {data_file} file.')
with open(data_file, 'wb') as file:
pickle.dump(done_experiment, file)
print('File saved.\n')
# Save the model config just to be sure
model_config_file = f'{base_dir}/model_config'
print(f'Saving model config to {model_config_file} file.')
with open(model_config_file, 'wb') as file:
conf.__attrs_post_init__ = None
conf.model_conf.attacker_torch_utility = None
conf.model_conf.attacker_utility = None
conf.model_conf.defender_conf.nn_conf.loss_function = None
pickle.dump(conf, file)
print('File saved.')
use_blocking_model: False
legacy_folder: /home/ignac/experiments/game_theory_model_latency/legacy1
num_of_experiments: 2
nn_epochs: 2000
i_a: 1
i_d: 2
benign_ratio: 10000
features_num: 2
data_file: normal_distribution_experiments.csv
import os
import pickle
import time
from dataclasses import dataclass
from os.path import dirname
from pathlib import Path
from typing import List
import torch
import yaml
from src.config import RootConfig
from src.game import Game
CONFIG_FILE = 'configuration.yaml'
@dataclass
class AttackerAction:
action: List[float]
prob: float
@dataclass
class DefenderAction:
model_file_id: str
prob: float
loss: float
fp_part: float
@dataclass
class SubResult:
legacy_folder: str
time: float
zero_sum_game_value: float
almost_zero_attacker_value: float
almost_zero_defender_value: float
attacker_actions: List[AttackerAction]
defender_actions: List[DefenderAction]
@dataclass
class Experiment:
sub_results: List[SubResult]
def get_configuration() -> dict:
with open(Path(dirname(__file__)) / CONFIG_FILE, 'r', encoding='utf-8')\
as file:
content = file.read()
return yaml.load(content, Loader=yaml.FullLoader)
def get_root_conf(conf_of_conf: dict) -> RootConfig:
conf = RootConfig()
conf.debug = False
conf.plot_conf.plot_enabled = False
conf.model_conf.use_blocking_model = bool(conf_of_conf['use_blocking_model'])
conf.model_conf.set_ia_id_benign_ration(conf_of_conf['i_a'],
conf_of_conf['i_d'],
conf_of_conf['benign_ratio'])
conf.model_conf.set_data_file(conf_of_conf['data_file'])
conf.model_conf.defender_conf.nn_conf.epochs = conf_of_conf['nn_epochs']
return conf
if __name__ == "__main__":
print('Starting game theory latency model experiment')
experiment_conf = get_configuration()
base_dir = experiment_conf["legacy_folder"]
if not os.path.exists(base_dir):
print(f'Creating base dir {base_dir}')
os.mkdir(base_dir)
conf = get_root_conf(experiment_conf)
repetitions = experiment_conf['num_of_experiments']
sub_results = []
for i in range(repetitions):
print(f'Starting {i+1}. iteration.')
os.mkdir(f'{base_dir}/{i}')
start = time.time()
result = Game(conf).solve_game()
time_taken = time.time() - start
attacker_actions = []
for p1_action, p1_prob in zip(result.ordered_actions_p1,
result.probs_p1):
if p1_prob == 0: continue
attacker_actions.append(AttackerAction(p1_action, p1_prob))
defender_actions = []
for p2_action, p2_prob in zip(result.ordered_actions_p2,
result.probs_p2):
if p2_prob == 0: continue
defender_actions.append(DefenderAction(p2_action.id,
p2_prob,
p2_action.final_loss,
p2_action.final_fp_cost))
torch.save(p2_action.model.state_dict(),
f'{base_dir}/{i}/{p2_action.id}.pt')
sub_result = SubResult(str(i),
time_taken,
result.zero_sum_nash_val,
result.attacker_value,
result.defender_value,
attacker_actions,
defender_actions)
sub_results.append(sub_result)
done_experiment = Experiment(sub_results)
print('Experiment done.')
# Save the result data
data_file = f'{base_dir}/data'
print(f'Saving result data to {data_file} file.')
with open(data_file, 'wb') as file:
pickle.dump(done_experiment, file)
print('File saved.\n')
# Save the model config just to be sure
model_config_file = f'{base_dir}/model_config'
print(f'Saving model config to {model_config_file} file.')
with open(model_config_file, 'wb') as file:
conf.__attrs_post_init__ = None
conf.model_conf.attacker_torch_utility = None
conf.model_conf.attacker_utility = None
conf.model_conf.defender_conf.nn_conf.loss_function = None
pickle.dump(conf, file)
print('File saved.')
experiments_per_setup: 1 # 6
legacy_folder: /home/ignac/experiments/learning_epochs/legacy1
epochs:
lower_bound: 2000
number_of_steps: 2 # 6-8 should be enough
upper_bound: 5000 # 30000
conf:
i_a: 1
i_d: 2
benign_ratio: 10000
data_file: normal_distribution_experiments.csv
import os
import pickle
import time
from os.path import dirname
from pathlib import Path
from typing import List
import torch
import yaml
from dataclasses import dataclass
from src.config import RootConfig
from src.game import Game
CONFIG_FILE = 'configuration.yaml'
@dataclass
class AttackerAction:
action: List[float]
prob: float
@dataclass
class DefenderAction:
model_file_id: str
prob: float
loss: float
fp_part: float
@dataclass
class SubResult: