Commit f0a0f81b authored by Martin Řepa's avatar Martin Řepa

Prepare learning epochs 2000and22000 with plot option

parent 5b9eef90
......@@ -75,7 +75,7 @@ class AttackerConfig:
@attr.s
class ModelConfig:
# Use blocking or latency model?
use_blocking_model: bool = attr.ib(default=True)
use_blocking_model: bool = attr.ib(default=False)
# Number of features
features_count: int = attr.ib(default=2)
......@@ -131,6 +131,11 @@ class PlotterConfig:
# Sets logger to debug level
plot_enabled: bool = attr.ib(default=True)
learning_epochs_plotter = attr.ib(default=False)
# Sets logger to debug level
output_svg_dir: str = attr.ib(default='/home/ignac/experiments/test')
@attr.s
class RootConfig:
......
experiments_per_setup: 6 # 6
legacy_folder: /home/ignac/experiments/learning_epochs/2000to30000by4000_6times_bignn_synteticdata # Change in regards with configuration
experiments_per_setup: 2 # 6
legacy_folder: /home/ignac/experiments/learning_epochs/2000and22000_2times_bignn_gaussdata_plot # Change in regards with configuration
epochs:
lower_bound: 2000
number_of_steps: 8 # 8 (6-8 should be enough)
upper_bound: 30000 # 30000
number_of_steps: 2 # 8 (6-8 should be enough)
upper_bound: 22000 # 30000
conf:
i_a: 1
i_d: 3
benign_ratio: 5
data_file: syntetic_experiments.csv
data_file: normal_distribution_experiments.csv
plot: True
......@@ -69,6 +69,7 @@ def exec_new_setup(conf: RootConfig, folder: str, iterations: int) -> Setup:
if not os.path.exists(f'{folder}/{i}'):
os.mkdir(f'{folder}/{i}')
conf.plot_conf.output_svg_dir = f'{folder}/{i}'
print(f'Starting {i+1}. iteration of setup with {cur_epochs} epochs')
start = time.time()
......@@ -119,13 +120,16 @@ def exec_new_setup(conf: RootConfig, folder: str, iterations: int) -> Setup:
def get_root_conf(conf_of_conf: dict) -> RootConfig:
conf = RootConfig()
conf.debug = False
conf.plot_conf.plot_enabled = False
conf.model_conf.use_blocking_model = False
conf.model_conf.set_ia_id_benign_ration(conf_of_conf['i_a'],
conf_of_conf['i_d'],
conf_of_conf['benign_ratio'])
conf.model_conf.set_data_file(conf_of_conf['data_file'])
is_plotting = conf_of_conf['plot']
conf.plot_conf.plot_enabled = is_plotting
conf.plot_conf.learning_epochs_plotter = is_plotting
return conf
......
......@@ -9,7 +9,7 @@ from src.actors.attacker import DiscreteAttacker, GradientAttacker
from src.actors.defender import Defender
from src.config import RootConfig
from src.neural_networks.network import NeuralNetwork
from src.visual.plotter import BlockingPlotter, LatencyPlotter, FakePlotter
from src.visual.plotter import FakePlotter, LearningEpochsPlotter, DebugPlotter
logger = logging.getLogger(__name__)
......@@ -51,10 +51,12 @@ class GameSolver:
self.plotter = FakePlotter()
else:
discr_actions = self.attacker.create_discrete_actions()
if conf.model_conf.use_blocking_model:
self.plotter = BlockingPlotter(conf.plot_conf, discr_actions)
if conf.plot_conf.learning_epochs_plotter:
self.plotter = LearningEpochsPlotter(conf.plot_conf, discr_actions,
conf.model_conf.defender_conf.benign_data_file_name)
else:
self.plotter = LatencyPlotter(conf.plot_conf, discr_actions)
self.plotter = DebugPlotter(conf.plot_conf, discr_actions,
conf.model_conf.defender_conf.benign_data_file_name)
def double_oracle(self) -> Result:
# Get initial actions as the first ones
......
......@@ -33,29 +33,72 @@ class FakePlotter:
pass
class Plotter:
def __init__(self, conf: PlotterConfig, discr_actions):
class LearningEpochsPlotter:
def __init__(self, conf: PlotterConfig, discr_actions, data_filename):
self.conf = conf
if not self.conf.plot_enabled:
return
self.actions = torch.tensor(discr_actions).float().to(DEVICE)
x, _ = np_matrix_from_scored_csv(data_filename, 0)
self.dataset = np.unique(x, axis=0)
def plot_iteration(self, iteration, zero_sum_val, p1_br_value, p2_br_value,
played_p2, probs_p2, played_p1, probs_p1, br_p1, br_p2):
if not self.conf.plot_enabled:
return
fig, ax = plt.subplots()
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
plt.tight_layout()
# Plot benign data-set --- TMP ---
ax.scatter(self.dataset[:, 0], self.dataset[:, 1], c='blue', s=1.)
# ---------------
# Plot defender's best response
res = br_p2.latency_predict(self.actions) \
.cpu().numpy().reshape((101, 101), order='F')
ax.imshow(res, cmap='Reds', vmin=0, vmax=1, origin='lower', extent=(0., 1., 0., 1.))
# Plot attacker nash strategy
for point, prob in zip(played_p1, probs_p1):
if prob == 0:
continue
ax.scatter(point[0], point[1], c='green', marker='^')
ax.annotate(f'{round(prob, 2)}', (point[0], point[1]), color='green')
fig.savefig(f'{self.conf.output_svg_dir}/iteration{iteration}.svg',
format='svg', bbox_inches='tight')
plt.close(fig)
class DebugPlotter:
def __init__(self, conf: PlotterConfig, discr_actions, data_filename):
self.conf = conf
if not self.conf.plot_enabled:
return
self.plotted = []
self._init_plots(discr_actions)
self._init_plots(discr_actions, data_filename)
# Variables to plot value of the game convergence
self.last_zero_sum_value = None
self.last_zero_sum_att_value = None
self.last_zero_sum_def_value = None
def _init_plots(self, discr_actions):
def _init_plots(self, discr_actions, data_filename):
plt.ion()
self.fig, self.ax = plt.subplots(2, 3)
self.actions = torch.tensor(discr_actions).float().to(DEVICE)
self.plotted = []
# TMP-----------
x, _ = np_matrix_from_scored_csv('normal_distribution_experiments.csv', 0)
x, _ = np_matrix_from_scored_csv(data_filename, 0)
x = np.unique(x, axis=0) * 100
self.ax[0][0].scatter(x[:, 0], x[:, 1], c='blue', s=1.)
# ---------------
......@@ -95,69 +138,6 @@ class Plotter:
self.fig.canvas.draw()
plt.pause(0.000001)
def _plot_iteration(self, *args):
raise NotImplemented()
class LatencyPlotter(Plotter):
def __init__(self, conf: PlotterConfig, discr_actions):
super().__init__(conf, discr_actions)
def _plot_iteration(self, iteration, zero_sum_val, p1_br_value, p2_br_value,
played_p2, probs_p2, played_p1, probs_p1, br_p1, br_p2):
# Set title of current figure
self.fig.suptitle(f'Iteration: {iteration}, value: {zero_sum_val}')
# Plot heat-map of defender's nash strategy actions
res = np.zeros((101, 101))
for nn, prob in zip(played_p2, probs_p2):
if prob == 0: continue
predictions = nn.latency_predict(self.actions).cpu().numpy()
res += (predictions * prob).reshape((101, 101), order='F')
self.plotted.append(self.ax[0][0].imshow(res, cmap='Reds', vmin=0,
vmax=1, origin='lower',
interpolation='spline16'))
# Plot attacker nash strategy
for point, prob in zip(played_p1, probs_p1):
if prob == 0:
continue
self.plotted.append(self.ax[0][1].scatter(point[0], point[1],
c='red', marker='^'))
self.plotted.append(self.ax[0][1].annotate(f'{round(prob, 2)}',
(point[0], point[1])))
# Add attacker new action to subplot with all his actions
self.ax[0][2].scatter(br_p1[0], br_p1[1], c='blue', marker='^')
# Plot heat-map of defender's best response
res = br_p2.latency_predict(self.actions).cpu().numpy().reshape((101, 101), order='F')
self.plotted.append(self.ax[1][0].imshow(res, cmap='Reds', vmin=0,
vmax=1, origin='lower',
interpolation='spline16'))
# Plot attacker best response
self.plotted.append(self.ax[1][1].scatter(br_p1[0], br_p1[1], c='red'))
# Plot game value convergence
if self.last_zero_sum_value is not None:
x_vals = [iteration - 1, iteration]
self.ax[1][2].plot(x_vals, [self.last_zero_sum_value, zero_sum_val],
c='blue')
self.ax[1][2].plot(x_vals, [self.last_zero_sum_att_value, p1_br_value],
c='red')
self.ax[1][2].plot(x_vals, [self.last_zero_sum_def_value, p2_br_value],
c='green')
self.last_zero_sum_value = zero_sum_val
self.last_zero_sum_att_value = p1_br_value
self.last_zero_sum_def_value = p2_br_value
# TODO one plotter is maybe enough
class BlockingPlotter(Plotter):
def __init__(self, conf: PlotterConfig, discr_actions):
super().__init__(conf, discr_actions)
def _plot_iteration(self, iteration, zero_sum_val, p1_br_value, p2_br_value,
played_p2, probs_p2, played_p1, probs_p1, br_p1, br_p2):
# Set title of current figure
......@@ -188,7 +168,6 @@ class BlockingPlotter(Plotter):
res = br_p2.latency_predict(self.actions).cpu().numpy().reshape((101, 101), order='F')
self.plotted.append(self.ax[1][0].imshow(res, cmap='Reds', vmin=0,
vmax=1, origin='lower'))
# Plot attacker best response
self.plotted.append(self.ax[1][1].scatter(br_p1[0], br_p1[1], c='red'))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment