Commit 49a8c25a authored by Martin Řepa's avatar Martin Řepa

Add configurable i_a and i_d

parent 4e74b2d9
......@@ -17,4 +17,4 @@ matplotlib = "*"
torch = "*"
[requires]
python_version = "3.6"
python_version = "3.7"
This diff is collapsed.
......@@ -24,6 +24,7 @@ class Attacker:
self.conf = model_conf.attacker_conf
self.features_count = model_conf.features_count
self.utility = model_conf.attacker_utility
self.torch_utility = model_conf.attacker_torch_utility
self.actions: np.array = None
if not self.conf.use_gradient_descent:
......@@ -39,14 +40,6 @@ class Attacker:
return self._gradient_best_response(actions, probs)
else:
return self._discrete_best_response(actions, probs)
# # TMP
# optimal = self._discrete_best_response(def_actions, def_probs)
# gradient_brp = self._gradient_best_response(def_actions, def_probs)
#
# if list(map(lambda a: round(a, 2), gradient_brp)) != optimal:
# print("A je to v píči")
#
# return optimal
def _discrete_best_response(self, def_actions: List, def_probs: List) -> List:
best_rp = max(self.actions, key=lambda a1: sum(map(operator.mul, map(
......@@ -68,13 +61,9 @@ class Attacker:
# logger.debug(f'Epoch {i} in attacker best response searching')
loss = 0
for nn, prob in zip(def_actions, def_probs):
prediction = nn._limit_predict(attacker_action,
with_grad=True)
# Attacker wants to maximize its gain, but optimiser tries
# to minimize. That's why we negate the objective function
loss += -(torch.add(1, -prediction) * prob *
torch.prod(attacker_action))
loss += -(self.torch_utility(attacker_action, nn) * prob)
# Calculate gradient and update the value
optimizer.zero_grad()
......@@ -85,7 +74,7 @@ class Attacker:
attacker_action.data.clamp_(min=0.0, max=1.0)
action = [attacker_action[i].item() for i in range(self.features_count)]
action_gain = - loss.item() # Negate the loss again
action_gain = - loss.item() # Negate the loss again for correct value
return action, action_gain
def _gradient_best_response(self, def_actions: List, def_probs: List) -> List:
......
......@@ -2,7 +2,8 @@ from typing import Callable
import attr
from utility import attacker_rate_limit_utility
from utility import get_attacker_utility, get_attacker_torch_grad_utility, \
get_nn_loss_function
@attr.s
......@@ -13,6 +14,9 @@ class NeuralNetworkConfig:
# Learning rate for Adam optimiser
learning_rate = 0.5e-1
# Loss function used for training
loss_function: Callable = attr.ib(init=False)
@attr.s
class DefenderConfig:
......@@ -58,11 +62,12 @@ class AttackerConfig:
# Attention. Used only when use_gradient_descent is set to True!
epochs = 500
@attr.s
class ModelConfig:
# Name of .csv file in src/data/scored directory with scored data which will
# be used as benign data in neural network training phase
benign_data_file_name: str = attr.ib(default='test.csv') # all_benign_scored.csv
benign_data_file_name: str = attr.ib(default='test.csv')
# Number of benign records to be loaded
benign_data_count: int = attr.ib(default=1000)
......@@ -76,9 +81,25 @@ class ModelConfig:
# Defender
defender_conf: DefenderConfig = attr.ib(default=DefenderConfig())
# i_a
i_a: int = attr.ib(default=1)
# i_d
i_d: int = attr.ib(default=4)
# Function to calculate utility for attacker given the actions
# f: List[float], NeuralNetwork -> float
attacker_utility: Callable = attr.ib(default=attacker_rate_limit_utility)
attacker_utility: Callable = attr.ib(init=False)
# Attacker utility function using torch tensors with gradient property
# Used for attacker to find best response via gradient descent
attacker_torch_utility: Callable = attr.ib(init=False)
def __attrs_post_init__(self):
self.attacker_utility = get_attacker_utility(self.i_a)
self.attacker_torch_utility = get_attacker_torch_grad_utility(self.i_a)
self.defender_conf.nn_conf.loss_function = get_nn_loss_function(
self.i_a, self.i_d)
@attr.s
......@@ -95,4 +116,4 @@ class RootConfig:
if __name__ == "__main__":
pass
a = RootConfig()
import logging
from pathlib import Path
from typing import Callable
import attr
import numpy as np
......@@ -60,15 +61,6 @@ class NeuralNetwork:
self.attacker_actions = attack
self.benign_data = benign_data
def loss_function(self, x, limits, real_y, probs):
zero_sum_part = torch.sum(real_y*(1-limits)*torch.prod(x, dim=1)*probs)
fp_cost = self._fp_cost_tensor(limits, real_y, probs)
sum_loss = torch.add(zero_sum_part, fp_cost)
return sum_loss
def _fp_cost_tensor(self, limits, real_y, probs):
return torch.sum((1-real_y) * probs * torch.pow(limits, 4))
def _prepare_data(self):
defender = self.benign_data
attacker = self.attacker_actions
......@@ -100,15 +92,14 @@ class NeuralNetwork:
optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
for e in range(self.conf.epochs):
# Forward pass: compute predicted y by passing x to the model.
train_limits = self._limit_predict(self.x_train, with_grad=True)
# Forward pass: compute predicted y by passing x to the model
train_ltncies = self._latency_predict(self.x_train, with_grad=True)
# Compute loss.
loss = self.loss_function(self.x_train, train_limits, self.y_train,
self.probs_train)
# loss = self.loss_fn(train_limits, self.y_train)
# Compute loss
loss, _ = self.conf.loss_function(self.x_train, train_ltncies,
self.y_train, self.probs_train)
# Compute validation loss and report some info
# Log loss function value each 5 epochs
if e % 5 == 0:
logging.debug(f'Epoch: {e}/{self.conf.epochs},\t'
f'TrainLoss: {loss},\t')
......@@ -125,15 +116,18 @@ class NeuralNetwork:
# parameters
optimizer.step()
self.final_fp_cost = self._fp_cost_tensor(train_limits, self.y_train,
self.probs_train).item()
self.final_loss = loss
with torch.no_grad():
loss, fp_part = self.loss_function(self.x_train, train_ltncies,
self.y_train, self.probs_train)
# measuring quality of final network
self.final_loss = loss.item()
self.final_fp_cost = fp_part.item()
def _raw_predict(self, tensor: torch.Tensor):
pred = self.model(tensor)
return pred.flatten().float()
def _limit_predict(self, x: torch.Tensor, with_grad=False):
def _latency_predict(self, x: torch.Tensor, with_grad=False):
if with_grad:
raw_prediction = self._raw_predict(x)
else:
......@@ -143,30 +137,30 @@ class NeuralNetwork:
# The same as lambda p: 0 if p < 0.5 else (p - 0.5) * 2
# TODO try to use e.g. sigmoid
clamped = raw_prediction.clamp(min=0.5, max=1)
limit = torch.mul(torch.add(clamped, -0.5), 2)
return limit
latency = torch.mul(torch.add(clamped, -0.5), 2)
return latency
def predict_single_limit(self, input, return_tensor=False):
def predict_single_latency(self, input, return_tensor=False):
in_type = type(input)
if in_type == list or in_type == tuple or \
in_type == np.array or in_type == np.ndarray:
input = torch.tensor(input).float()
if return_tensor:
return self._limit_predict(input)[0]
return self._latency_predict(input)[0]
else:
return self._limit_predict(input)[0].item()
return self._latency_predict(input)[0].item()
def setup_loger(conf):
def setup_loger(debug: bool):
log_format = ('%(asctime)-15s\t%(name)s:%(levelname)s\t'
'%(module)s:%(funcName)s:%(lineno)s\t%(message)s')
level = logging.DEBUG if conf.base_conf.debug else logging.INFO
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level, format=log_format)
if __name__ == '__main__':
setup_loger(RootConfig())
setup_loger(True)
benign_x, _ = np_arrays_from_scored_csv(
Path('all_benign_scored.csv'), 0, 500)
malicious_x, _ = np_arrays_from_scored_csv(
......@@ -182,7 +176,8 @@ if __name__ == '__main__':
malicious_y = np.ones(len(malicious_unique_x))
malicious_data = FormattedData(malicious_unique_x, probs_malicious, malicious_y)
nn = NeuralNetwork()
conf = RootConfig()
nn = NeuralNetwork(conf.model_conf.nn_loss_function)
nn.set_data(benign_data, malicious_data)
nn.train()
import functools
import operator
from typing import List
from typing import TYPE_CHECKING
import numpy as np
# Hack to avoid cycle imports while using type checking
# The TYPE_CHECKING constant is always False at runtime
import torch
if TYPE_CHECKING:
from src.neural_networks.network import NeuralNetwork
def attacker_rate_limit_utility(attacker_features: List[float], defender_network: 'NeuralNetwork'):
pred = defender_network.predict_single_limit(attacker_features)
return functools.reduce(operator.mul, attacker_features, 1) * (1 - pred)
def get_attacker_utility(i_a: int):
def attacker_utility(attacker_features: List[float], nn: 'NeuralNetwork'):
pred = nn.predict_single_latency(attacker_features)
return np.product(attacker_features) * (1 - pred) ** i_a
return attacker_utility
def get_attacker_torch_grad_utility(i_a: int):
def attacker_utility(attacker_features: torch.tensor, nn: 'NeuralNetwork'):
latency = nn._latency_predict(attacker_features, with_grad=True)
return torch.pow(torch.add(1, -latency), i_a) \
* torch.prod(attacker_features)
return attacker_utility
def get_nn_loss_function(i_a: int, i_d: int):
def loss_function(x, latencies, real_y, probs):
zero_sum_part = torch.sum(real_y*((1-latencies)**i_a)*torch.prod(x, dim=1)*probs)
fp_cost = torch.sum((1-real_y) * probs * torch.pow(latencies, i_d))
return torch.add(zero_sum_part, fp_cost), fp_cost
return loss_function
......@@ -43,7 +43,7 @@ class Plotter:
for nn, prob in self.defenders:
if prob == 0:
continue
pred = nn.predict_single_limit(point)
pred = nn.predict_single_latency(point)
if pred:
sum_prob += prob
......@@ -63,7 +63,7 @@ class Plotter:
plt.xlabel('entropy')
plt.ylabel('length')
for point in points:
pred = neural_network.predict_single_limit(point)
pred = neural_network.predict_single_latency(point)
red = pred
green = 1-pred
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment