Commit 78bdbab5 authored by Martin Řepa's avatar Martin Řepa

backup

parent 58614315
......@@ -14,6 +14,7 @@ pandas = "*"
sklearn = "*"
tensorflow = "*"
matplotlib = "*"
torch = "*"
[requires]
python_version = "3.6"
No preview for this file type
......@@ -2,13 +2,13 @@ from typing import Callable
import attr
from utility import base_utility
from utility import rate_limit_utility
@attr.s
class NeuralNetworkConfig:
# Number of epochs in a neural network training phase
epochs: int = attr.ib(default=40)
epochs: int = attr.ib(default=1000)
# String with loss_function definition.
# List of available functions: https://keras.io/losses/
......@@ -18,24 +18,26 @@ class NeuralNetworkConfig:
# List of available optimizers: https://keras.io/optimizers/
optimizer: str = attr.ib(default='adam')
# From docs:
# Value used for weighting the loss function (during training only) for
# malicious requests. This can be useful to tell the model to "pay more
# attention" to malicious samples.
# Setting it to 1 makes loss function behave equally for both predictions
# during training
fp_weight: int = attr.ib(default=5)
fp_weight: int = attr.ib(default=1)
@attr.s
class TrainingNnConfig:
# Name of .csv file in src/data/scored directory with scored data which will
# be used as benign data in neural network training phase
benign_data_file_name: str = attr.ib(default='all_benign_scored.csv')
benign_data_file_name: str = attr.ib(default='test.csv') #all_benign_scored.csv
# Number of benign records to be used
benign_data_count: int = attr.ib(default=1000)
# Number \in [0-1] representing fraction of data used as validation dataset
validation_split: float = attr.ib(default=0.1)
# Specifying number of fake malicious DNS records created each
# iteration of double oracle algorithm from attacker's actions used in
# neural network training phase
......@@ -62,11 +64,11 @@ class BaseConfig:
# Sum(probability of each action times its fp_rate) must be less than this
# number. Fp_rate of the action is total number of malicious prediction for
# given benign data set
false_positives_allowed: int = attr.ib(default=10)
false_positives_allowed: int = attr.ib(default=0.5)
# Function to calculate utility given the actions
# f: List[float], NeuralNetwork -> float
utility_function: Callable = attr.ib(default=base_utility)
utility_function: Callable = attr.ib(default=rate_limit_utility)
@attr.s
......
......@@ -27,14 +27,14 @@ def np_arrays_from_scored_csv(file_name: str, label: int,
record.append(float(item))
batch.append(record)
labels.append(label)
labels.append([label])
if len(batch) == count_max:
break
return np.array(batch, np.float), np.array(labels, np.int8)
return np.array(batch, np.float), np.array(labels, np.uint8)
if __name__ == "__main__":
a = np_arrays_from_scored_csv(Path('scored/scored_malicious.csv'), 1, 100)
a = np_arrays_from_scored_csv('all_benign_scored.csv', 0, 1000)
print(a)
initial0,initial1
0.036923076923076927,0.8166666666666665
0.05256410256410256,0.64
0.1382051282051282,0.56
0.15384615384615385,0.3833333333333333
0.12948717948717947,0.41142857142857137
0.21512820512820513,0.34500000000000003
0.24076923076923076,0.2688888888888889
0.2364102564102564,0.22
0.3120512820512821,0.23636363636363636
0.33769230769230774,0.16666666666666663
0.3233333333333333,0.22
0.378974358974359,0.1857142857142857
0.43461538461538457,0.16333333333333336
0.3802564102564102,0.1325
0.4358974358974359,0.16294117647058826
0.4115384615384615,0.13444444444444445
0.5271794871794871,0.18684210526315792
0.5328205128205128,0.13
0.5484615384615384,0.10380952380952381
0.5941025641025641,0.15818181818181817
0.6197435897435898,0.06304347826086956
0.6353846153846154,0.07833333333333332
0.631025641025641,0.134
0.7066666666666667,0.12000000000000001
0.6823076923076923,0.046296296296296294
0.7079487179487179,0.11285714285714285
0.7635897435897436,0.0696551724137931
0.8192307692307692,0.07666666666666669
0.8448717948717949,0.11387096774193549
0.8505128205128205,0.08125
0.8161538461538461,0.05878787878787879
0.9017948717948718,0.06647058823529413
0.8874358974358973,0.024285714285714285
0.923076923076923,0.09222222222222223
0.9087179487179486,0.02027027027027027
1,0.03842105263157895
0.98,0.09666666666666666
0.05692307692307692,0.8766666666666666
0.06256410256410255,0.62
0.17820512820512818,0.48000000000000004
0.16384615384615386,0.46333333333333326
0.1794871794871795,0.3214285714285714
0.2251282051282051,0.29500000000000004
0.27076923076923076,0.2488888888888889
0.24641025641025638,0.24000000000000002
0.28205128205128205,0.21636363636363637
0.3476923076923077,0.22666666666666666
0.35333333333333333,0.24000000000000002
0.328974358974359,0.1857142857142857
0.3846153846153846,0.19333333333333336
0.37025641025641026,0.17250000000000001
0.4058974358974359,0.16294117647058826
0.4915384615384615,0.09444444444444446
0.4971794871794872,0.1668421052631579
0.5128205128205128,0.16
0.5384615384615384,0.0838095238095238
0.5241025641025641,0.12818181818181817
0.5497435897435897,0.07304347826086957
0.5853846153846154,0.12833333333333333
0.631025641025641,0.15400000000000003
0.6966666666666667,0.14
0.7123076923076923,0.1362962962962963
0.757948717948718,0.052857142857142846
0.7435897435897436,0.0896551724137931
0.7992307692307692,0.07666666666666669
0.7748717948717948,0.09387096774193548
0.7705128205128204,0.04125
0.8461538461538461,0.05878787878787879
0.8217948717948718,0.046470588235294125
0.9274358974358974,0.04428571428571429
0.903076923076923,0.03222222222222223
0.9487179487179487,0.10027027027027027
0.9343589743589743,0.09842105263157895
1.0,0.04666666666666666
0.06692307692307693,0.8966666666666666
0.10256410256410256,0.67
0.17820512820512818,0.56
0.18384615384615385,0.4233333333333333
0.2094871794871795,0.3714285714285714
0.21512820512820513,0.34500000000000003
0.19076923076923075,0.3388888888888889
0.2264102564102564,0.31
0.24205128205128204,0.27636363636363637
0.3476923076923077,0.25666666666666665
0.3233333333333333,0.19
0.318974358974359,0.1557142857142857
0.3846153846153846,0.14333333333333337
0.43025641025641026,0.1625
0.4358974358974359,0.10294117647058824
0.4515384615384615,0.09444444444444446
0.48717948717948717,0.1568421052631579
0.48282051282051275,0.17
0.5584615384615385,0.1538095238095238
0.5241025641025641,0.08818181818181818
0.5997435897435898,0.09304347826086956
0.6253846153846154,0.11833333333333332
0.691025641025641,0.14400000000000002
0.6466666666666666,0.08
0.6423076923076922,0.056296296296296296
0.6879487179487179,0.14285714285714285
0.7835897435897436,0.07965517241379311
0.8192307692307692,0.08666666666666668
0.8048717948717948,0.1338709677419355
0.8405128205128205,0.06125
0.8361538461538461,0.1087878787878788
0.8817948717948718,0.07647058823529412
0.8874358974358973,0.09428571428571429
0.963076923076923,0.08222222222222222
0.9987179487179487,0.11027027027027028
0.9743589743589743,0.03842105263157895
0.95,0.07666666666666666
0.08692307692307692,0.8666666666666666
0.12256410256410256,0.7000000000000001
0.17820512820512818,0.5700000000000001
0.12384615384615386,0.46333333333333326
0.16948717948717948,0.3714285714285714
0.21512820512820513,0.375
0.26076923076923075,0.3188888888888889
0.2264102564102564,0.31
0.23205128205128206,0.25636363636363635
0.3276923076923077,0.24666666666666665
0.3233333333333333,0.17
0.38897435897435895,0.2157142857142857
0.4046153846153846,0.18333333333333338
0.43025641025641026,0.1825
0.4058974358974359,0.10294117647058824
0.4515384615384615,0.16444444444444445
0.4571794871794872,0.0868421052631579
0.5028205128205128,0.18
0.5784615384615385,0.11380952380952382
0.5741025641025641,0.1481818181818182
0.6097435897435898,0.09304347826086956
0.5853846153846154,0.08833333333333332
0.671025641025641,0.12400000000000001
0.6466666666666666,0.060000000000000005
0.7323076923076923,0.10629629629629629
0.747948717948718,0.12285714285714285
0.7035897435897436,0.0596551724137931
0.8192307692307692,0.04666666666666668
0.7648717948717948,0.08387096774193549
0.7705128205128204,0.051250000000000004
0.8861538461538462,0.1087878787878788
0.8317948717948718,0.02647058823529412
0.9474358974358974,0.04428571428571429
0.943076923076923,0.11222222222222222
0.9087179487179486,0.06027027027027027
0.9343589743589743,0.02842105263157895
0.96,0.09666666666666666
......@@ -105,12 +105,10 @@ class Synthesizer:
if __name__ == "__main__":
synt = Synthesizer(2)
synt.add_cluster_around_2Dfunc(lambda x: 0.2, 0.05)
synt.add_cluster_around_2Dfunc(lambda x: 0.8, 0.05)
synt.add_cluster(Cluster([0.2, 0.2], 0.15, 200))
synt.add_cluster(Cluster([0.2, 0.8], 0.15, 200))
synt.add_cluster(Cluster([0.8, 0.2], 0.15, 200))
synt.add_cluster(Cluster([0.8, 0.8], 0.15, 200))
synt.add_cluster_around_2Dfunc(lambda x: 1/(15*x), 0.05)
synt.add_cluster_around_2Dfunc(lambda x: 1 / (15 * x), 0.05)
synt.add_cluster_around_2Dfunc(lambda x: 1 / (15 * x), 0.05)
synt.add_cluster_around_2Dfunc(lambda x: 1 / (15 * x), 0.05)
synt.generate()
synt.plot2D()
synt.save_to_file(Path('scored/test.csv'))
......@@ -25,6 +25,7 @@ class Game:
def _create_attacker_actions(self):
one_axis = np.linspace(0, 1, 101) # [0.00, 0.01, 0.02, ..., 0.99, 1.00]
# one_axis = np.linspace(0, 1, 11) # [0.0, 0.1, 0.2, ..., 0.9, 1.0]
axes = self._conf.base_conf.features_count - 1
return list(itertools.product(one_axis, *itertools.repeat(one_axis, axes)))
......
This diff is collapsed.
from typing import List, Tuple
from tensorflow import keras
import numpy as np
from sklearn.utils import shuffle
from config import NeuralNetworkConfig, TrainingNnConfig
from neural_networks.network import OrderCounter
def tmp_loss_function(y_true, y_pred):
return keras.backend.mean(100 * keras.backend.square(y_pred - y_true),
axis=-1)
class KerasNeuralNetwork:
def __init__(self, input_features=2,
nn_conf: NeuralNetworkConfig = NeuralNetworkConfig(),
nn_train_conf: TrainingNnConfig = TrainingNnConfig()):
self.model = keras.Sequential([
keras.layers.Dense(10, activation='relu',
input_shape=(input_features,)),
keras.layers.Dense(12, activation='relu'),
keras.layers.Dense(1, activation='sigmoid'),
]
)
# nn_conf.loss_function
self.model.compile(loss=tmp_loss_function,
optimizer=nn_conf.optimizer,
metrics=['accuracy'])
self.false_positives = None
self.epochs = nn_conf.epochs
self.fp_weight = nn_conf.fp_weight
self.validation_split = nn_train_conf.validation_split
self.order = OrderCounter.next()
def train(self,
attacker_features_x: List[List[float]],
benign_data: Tuple[np.ndarray, np.ndarray]):
x, y = benign_data
# There are some attacker's features
attacker_features_x = np.array(attacker_features_x)
if len(attacker_features_x[0]):
attacker_features_y = [1 for _ in attacker_features_x]
x = np.concatenate((x, attacker_features_x), axis=0)
y = np.concatenate((y, attacker_features_y), axis=0)
x, y = shuffle(x, y, random_state=1)
self.model.fit(x, y,
validation_split=self.validation_split,
epochs=self.epochs,
class_weight={0: 1, 1: self.fp_weight})
def calc_n0_false_positives(self, x_test: np.ndarray):
limits = self.predict_rate_limit(x_test)
self.false_positives = sum(map(lambda l: l ** 4, limits))
def predict(self, xs: np.ndarray):
return self.model.predict(xs)
def predict_rate_limit(self, xs: np.ndarray):
prediction = self.predict(xs)
return list(map(lambda x: 0 if x < 0.5 else (x - 0.5) * 2, prediction))
def predict_solo(self, attacker_features: List[float]) -> int:
features = np.array([attacker_features])
prediction = self.model.predict(features)
# returns number \in [0, 1]
return prediction[0][0]
def predict_solo_rate_limit(self, attacker_features: List[float]) -> int:
prediction = self.predict_solo(attacker_features)
return 0 if prediction < 0.5 else (prediction - 0.5) * 2
def get_false_positive_rate(self):
return self.false_positives
def __str__(self):
return f'(Neural network {self.order} with FP n0: {self.false_positives})'
if __name__ == "__main__":
pass
import logging
from pathlib import Path
from typing import List, Tuple
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from tensorflow import keras
from torch import nn
from torch import optim
from config import NeuralNetworkConfig
from config import NeuralNetworkConfig, TrainingNnConfig, RootConfig
from src.data.loader import np_arrays_from_scored_csv
logger = logging.getLogger(__name__)
class OrderCounter:
order = 0
@staticmethod
def next():
OrderCounter.order += 1
return OrderCounter.order
class NeuralNetwork:
def __init__(self, input_features=2,
nn_conf: NeuralNetworkConfig=NeuralNetworkConfig()):
self.model = keras.Sequential([
keras.layers.Dense(10, activation='relu', input_shape=(input_features,)),
keras.layers.Dense(12, activation='relu'),
keras.layers.Dense(1, activation='sigmoid'),
]
nn_conf: NeuralNetworkConfig = NeuralNetworkConfig(),
nn_train_conf: TrainingNnConfig = TrainingNnConfig()):
self.model = nn.Sequential(
nn.Linear(input_features, 10),
nn.ReLU(),
nn.Linear(10, 12),
nn.ReLU(),
nn.Linear(12, 1),
nn.Sigmoid()
)
self.model.compile(loss=nn_conf.loss_function,
optimizer=nn_conf.optimizer,
metrics=['accuracy'])
self.false_positives = None
self.loss_fn = nn.BCELoss()
self.attacker_actions = None
self.epochs = nn_conf.epochs
self.fp_weight = nn_conf.fp_weight
self.validation_split = nn_train_conf.validation_split
def train(self,
attacker_features_x: List[List[float]],
benign_data: Tuple[np.ndarray, np.ndarray]):
self.order = OrderCounter.next()
def set_attacker_actions(self, attacker_actions: Tuple):
self.attacker_actions = attacker_actions
def loss_function(self):
pass
def _prepare_data(self, attacker_features_x: List[List[float]],
benign_data: Tuple[np.ndarray, np.ndarray]):
x, y = benign_data
# There are some attacker's features
# Add attacker's malicious actions to dataset
attacker_features_x = np.array(attacker_features_x)
if len(attacker_features_x[0]):
attacker_features_y = [1 for _ in attacker_features_x]
attacker_features_y = [[1] for _ in attacker_features_x]
x = np.concatenate((x, attacker_features_x), axis=0)
y = np.concatenate((y, attacker_features_y), axis=0)
# Shuffle benign and malicious data
x, y = shuffle(x, y, random_state=1)
self.model.fit(x, y,
epochs=self.epochs,
class_weight={0: 1, 1: self.fp_weight})
def calc_n0_false_positives(self, x_test: np.ndarray):
res = self.model.predict(x_test)
self.false_positives = sum(map(lambda x: 0 if x <= 0.5 else 1, res))
# Split data so we have train dataset and validation dataset
data = train_test_split(x, y, test_size=self.validation_split)
# Convert data to float() for pyTorch model compatibility
data = tuple(map(lambda a: torch.from_numpy(a).float(), data))
def predict(self, attacker_features: List[float]) -> int:
features = np.array([attacker_features])
prediction = self.model.predict(features)
# 1 -> malicious | 0 -> benign
return 0 if prediction[0][0] <= 0.5 else 1
# Return final data (x_train, x_validate, y_train, y_validate)
return data
def get_false_positive_rate(self):
return self.false_positives
def train(self,
attacker_features_x: List[List[float]],
benign_data: Tuple[np.ndarray, np.ndarray]):
data = self._prepare_data(attacker_features_x, benign_data)
x_train, x_validate, y_train, y_validate = data
self._train(x_train, y_train, x_validate, y_validate)
def __str__(self):
return f'(Neural network {self.__hash__()} with FP n0: {self.false_positives})'
def _train(self, x, y, x_validate, y_validate):
learning_rate = 1e-2
optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
x.requires_grad = True
for e in range(self.epochs):
logger.debug(f'Running epoch number {e}/{self.epochs}')
# Forward pass: compute predicted y by passing x to the model.
y_pred = self.model(x)
if __name__ == '__main__':
# Prepare the data
benign_x, benign_y = np_arrays_from_scored_csv(Path('../data/scored/all_benign_scored.csv'), 0, 100)
malicious_x, malicious_y = np_arrays_from_scored_csv(Path('../data/scored/scored_malicious.csv'), 1, 10)
# Compute and print loss.
loss = self.loss_fn(y_pred, y)
logger.debug(f'TestLoss: {loss.item()}, ValidateLoss: todo') # todo
# Compute validation loss and report some info
if e % 5 == 0:
with torch.no_grad():
y_validate_pred = self.model(x_validate)
validate_loss = self.loss_fn(y_validate_pred, y_validate)
logging.debug(f'Epoch: {e}/{self.epochs},\t'
f'TrainLoss: {loss.item()},\t'
f'ValidateLoss: {validate_loss},\t')
# Before the backward pass, use the optimizer object to zero all of
# the gradients for the variables it will update
optimizer.zero_grad()
x = np.concatenate((benign_x, malicious_x), axis=0)
y = np.concatenate((benign_y, malicious_y), axis=0)
x, y = shuffle(x, y, random_state=1)
# Backward pass: compute gradient of the loss with respect to model
# parameters
loss.backward()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.10, random_state=41)
# Calling the step function on an Optimizer makes an update to its
# parameters
optimizer.step()
# Initialize the model
network = NeuralNetwork()
network.model.fit(x_train, y_train, epochs=40)
def raw_predict(self, x):
with torch.no_grad():
tensor = torch.tensor(x).float()
res = self.model(tensor)
return res.numpy()
test_loss, test_acc = network.model.evaluate(x_test, y_test)
print('Test accuracy:', test_acc)
def limit_predict(self, x):
raw_prediction = self.raw_predict(x)
print(network.calc_n0_false_positives(x_test))
np_limit_func = np.vectorize(lambda p: 0 if p < 0.5 else (p - 0.5) * 2)
return np_limit_func(raw_prediction)
def setup_loger(conf):
log_format = ('%(asctime)-15s\t%(name)s:%(levelname)s\t'
'%(module)s:%(funcName)s:%(lineno)s\t%(message)s')
level = logging.DEBUG if conf.base_conf.debug else logging.INFO
logging.basicConfig(level=level, format=log_format)
if __name__ == '__main__':
setup_loger(RootConfig())
benign_x, benign_y = np_arrays_from_scored_csv(
Path('all_benign_scored.csv'), 0, 1000)
malicious_x, malicious_y = np_arrays_from_scored_csv(
Path('scored_malicious.csv'), 1, 0)
nn = NeuralNetwork()
nn.train(malicious_x, (benign_x, benign_y))
# test_loss, test_acc = network.model.evaluate(x_test, y_test)
# print('Test loss:', test_loss)
# print('Test accuracy:', test_acc)
#
# network.calc_n0_false_positives(benign_x)
# print(network.get_false_positive_rate())
import functools
import itertools
import operator
from collections import Counter
from typing import List, Tuple, Callable
import matplotlib.pyplot as plt
import numpy as np
import pulp
from os.path import dirname
from data.loader import np_arrays_from_scored_csv
CURRENT_RUN = 1 # To name figures files in each iteration
def solve_with_lp(actions_attacker: List,
actions_defender: List,
u: Callable,
loss: Callable,
benign_data_prob: dict,
l: List):
print('Going to solve with LP')
print(f'Attacker\'s actions by now: {actions_attacker}')
print(f'Defender\'s actions by now: {actions_defender}')
print(f'Benign data probabilities are: {benign_data_prob}')
# Create LP problem
m = pulp.LpProblem("Zero sum game", pulp.LpMinimize)
# Minimizing value "v"
v = pulp.LpVariable("v")
m += v
# Player two probabilities vector
print("Defining defenders probabilities...")
probs_defender = []
for action in defender_actions:
probs_point = [pulp.LpVariable(f'p({action[0]},'
f'{action[1]},'
f'{k})', 0, 1) for k in l]
m += pulp.lpSum(probs_point) == 1 # Probabilities sum to 1
probs_defender.append(probs_point)
fp_cost = 0
for action, prob in zip(defender_actions, probs_defender):
for j in range(len(l)):
fp_cost += loss(l[j])*benign_data_prob[tuple(action)]*prob[j]
print("Defining main constraint...")
constraints = []
for i in range(len(attacker_actions)):
suma = [fp_cost]
for j in range(len(l)):
suma.append(probs_defender[i][j] * u(attacker_actions[i], l[j]))
constraints.append(pulp.lpSum(suma) <= v)
for c in constraints:
m += c
print("Ok, let's solve now...")
m.solve()
print(f'LP solved')
print(f'Value of the game: {v.varValue}')
print(f'Found solution: {pulp.LpStatus[m.status]}')
print(f'Attacker\'s probabilities:')
print(f'{list(str(abs(c.pi)) + " " for c in constraints)}')
print(f'Deffender\'s probabilities:')
for probs in probs_defender:
print(f'{list(str(item.varValue) + " " for item in probs)}')
return v.varValue, [abs(c.pi) for c in constraints], [prob for prob in
probs_defender]
def create_attacker_actions():
one_axis = np.linspace(0, 1, 11) # [0, 0.1, 0.2, ..., 0.9, 1]
return list(itertools.product(one_axis, one_axis))
def create_defenders_actions():
one_axis = np.linspace(0, 1, 11) # [0, 0.1, 0.2, ..., 0.9, 1]
return list(itertools.product(one_axis, one_axis))
def utility(attacker_action: Tuple, l):
return functools.reduce(operator.mul, attacker_action, 1) * (1 - l)
def plot_summarization(def_actions, def_probs, l, at_actions, at_probs, value):
fig = plt.gcf()
fig.set_size_inches(19, 11, forward=True)
plt.title(f'Optimal solution with rate limiting option in utility '
f'(not as lp constraint)')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
# Plot defender
for point, def_probs in zip(def_actions, def_probs):
notation = ''
for i in range(len(l)):
# No need to show no probability
if def_probs[i].varValue == 0:
continue
# There is color in case of doing nothing or blockin 100%
if def_probs[i].varValue == 1 and (l[i] == 1 or l[i] == 0):
notation = ''
break
addition = f'{l[i]}->{round(def_probs[i].varValue, 2)}'
notation = f'{notation}\n{addition}' if notation else f'{addition}'
plt.scatter(point[0], point[1], c=[pick_color(def_probs, l)])
plt.annotate(notation, (point[0], point[1]), weight="bold")
# Plot attacker
for point, prob in zip(at_actions, at_probs):
if prob == 0:
continue
plt.scatter(point[0], point[1] - 0.01, c='blue', marker='x')
plt.annotate(f'{round(prob, 2)}', (point[0], point[1] - 0.03),
weight='bold', color='blue')
# Plot vrstevnice, jen pro test
for i in range(10, -10, -1):
x = np.linspace(0, 1, 100)
plt.plot(x, (i/10)/x, alpha=0.1)
# Hack the legend
plt.plot([], [], ' ', label=f'Final value of the game = {round(value, 2)}')
plt.plot([100], [100], c='blue', label='\"FP\" loss function')
plt.scatter([100], [100], c='red', label='block 100%')
plt.scatter([100], [100], c='green', label='Do nothing')
plt.scatter([100], [100], c='blue', marker='x', label='Attacker actions')
plt.scatter([100], [100], s=0, label='limit -> prob')
plt.legend(prop={'size': 12})
# Show/save
plt.savefig(f'{dirname(__file__)}/../../results/optimal/'
f'{CURRENT_RUN}-summarization.png')
# plt.show()
plt.close(fig)