Commit c5e03e51 authored by Martin Řepa's avatar Martin Řepa

Created blocker model and some research about optimality of my nn was done

parent 2c39d5b3
......@@ -24,7 +24,8 @@ class Attacker:
return [np.random.uniform(0.0, 1.0) for _ in range(self.features_count)]
def get_initial_action(self) -> List:
return self.random_action()
return [0.59897846, 0.2900984]
# return self.random_action()
def get_best_response(self, def_actions: List, def_probs: List):
# Take only defenders actions which are played with non zero probability
......@@ -64,10 +65,14 @@ class DiscreteAttacker(Attacker):
def _get_best_response(self, def_actions: List, def_probs: List) -> List:
best_rp = max(self.actions, key=lambda a1: sum(map(operator.mul, map(
lambda a2: self.utility(a1, a2), def_actions), def_probs)))
# Calculate optimal value
self.value_of_last_brp = sum(map(operator.mul, map(lambda nn:
self.utility(best_rp, nn), def_actions), def_probs))
return list(best_rp)
def _does_br_exists(self, played_actions_p1, br_p1, value):
return br_p1 in played_actions_p1
return self.value_of_last_brp - value < self.conf.epsion
class GradientAttacker(Attacker):
......@@ -82,7 +87,7 @@ class GradientAttacker(Attacker):
all_actions = torch.tensor(all_actions, requires_grad=True)
optimizer = torch.optim.Adam([all_actions], lr=self.conf.learning_rate)
for i in range(self.conf.epochs):
for _ in range(self.conf.epochs):
losses = 0
for nn, prob in zip(def_actions, def_probs):
losses += -(self.torch_utility(all_actions, nn) * prob)
......
......@@ -10,9 +10,11 @@ logger = logging.getLogger(__name__)
def prepare_benign_data(raw_x_data) -> FormattedData:
logger.debug('Let\'s prepare benign data. Taking only unique records.')
unique, counts = np.unique(raw_x_data, axis=0, return_counts=True)
probs = np.array([count / len(raw_x_data) for count in counts])
benign_y = np.zeros(len(unique))
logger.debug('Data preparation done.')
return FormattedData(unique, probs, benign_y)
......@@ -42,15 +44,31 @@ class Defender:
best_nn = self._train_nn(attack)
for _ in range(1, self.conf.number_of_nn_to_train):
new_nn = self._train_nn(attack)
self._log_creation(new_nn, best_nn)
if new_nn.final_loss < best_nn.final_loss:
logger.debug(f'Found better nn. Old|New value: '
f'{best_nn.final_loss} | {new_nn.final_loss}')
best_nn = new_nn
else:
logger.debug(f'The previous nn was better, dif: '
f'{new_nn.final_loss - best_nn.final_loss}')
# If my response is to block nothing it might be hard to train so
# I try 10 more nns to train # TODO check if this does not get stuck
attacker_goal = np.sum(np.prod(attack.unique_x, axis=1)*attack.probs_x)
tries = 0
while abs(best_nn.final_loss - attacker_goal) < 1e-5 and tries < 10:
tries += 1
new_nn = self._train_nn(attack)
self._log_creation(new_nn, best_nn)
if new_nn.final_loss < best_nn.final_loss:
best_nn = new_nn
return best_nn
def _log_creation(self, new_nn, best_nn):
if new_nn.final_loss < best_nn.final_loss:
logger.debug(f'Found better nn. Old|New value: '
f'{best_nn.final_loss} | {new_nn.final_loss}')
else:
logger.debug(f'The previous nn was better, dif: '
f'{new_nn.final_loss - best_nn.final_loss}')
def get_initial_action(self) -> NeuralNetwork:
non_attack = self._get_empty_attack()
return self._train_nn(non_attack)
......
......@@ -2,17 +2,16 @@ from typing import Callable
import attr
from utility import get_attacker_utility, get_attacker_torch_grad_utility, \
get_nn_loss_function
from utility import *
@attr.s
class NeuralNetworkConfig:
# Number of epochs in a neural network training phase
epochs: int = attr.ib(default=600)
epochs: int = attr.ib(default=5000)
# Learning rate for Adam optimiser
learning_rate = 0.5e-1
learning_rate = 0.1e-2
# Loss function used for training
loss_function: Callable = attr.ib(init=False)
......@@ -20,13 +19,14 @@ class NeuralNetworkConfig:
@attr.s
class DefenderConfig:
# 2 neural networks are considered the same if difference of game value for
# them and each attacker's action is less than epsion
# New actor's action is considered already played in the game if difference
# between utility of new action and nash equilibrium value of zero-sum game
# is less than this value
defender_epsilon: float = attr.ib(default=1e-3)
# This number of neural networks will be trained in each double oracle
# iteration and the best one will be considered as a best response
number_of_nn_to_train: int = attr.ib(default=7)
number_of_nn_to_train: int = attr.ib(default=1)
# conf of neural networks
nn_conf: NeuralNetworkConfig = attr.ib(default=NeuralNetworkConfig())
......@@ -41,10 +41,9 @@ class AttackerConfig:
# [(.0,.01),(.0,.02),...,(.1,.1)]
use_gradient_descent: bool = attr.ib(default=True)
# 2 attacker actions are considered the same if difference of absolute value
# of attacker's utility function for them and all defender's actions is less
# than this value
# Attention. Used only when use_gradient_descent is set to True!
# New actor's action is considered already played in the game if difference
# between utility of new action and nash equilibrium value of zero-sum game
# is less than this value
epsion: float = attr.ib(default=1e-3)
# Number of random tries to find attacker action using gradient descent.
......@@ -55,16 +54,19 @@ class AttackerConfig:
# Learning rate for optimiser which updates attacker action while searching
# for best response using gradient descent
# Attention. Used only when use_gradient_descent is set to True!
learning_rate = 0.5e-2
learning_rate = 0.5e-4
# Number of iterations used to update gradient descent while searching for
# best response
# Attention. Used only when use_gradient_descent is set to True!
epochs = 200
epochs = 2000
@attr.s
class ModelConfig:
# Use blocking or latency model?
use_blocking_model: bool = attr.ib(default=False)
# Name of .csv file in src/data/scored directory with scored data which will
# be used as benign data in neural network training phase
benign_data_file_name: str = attr.ib(default='test.csv')
......@@ -99,11 +101,23 @@ class ModelConfig:
attacker_torch_utility: Callable = attr.ib(init=False)
def __attrs_post_init__(self):
self.attacker_utility = get_attacker_utility(self.i_a)
self.attacker_torch_utility = get_attacker_torch_grad_utility(self.i_a)
self.defender_conf.nn_conf.loss_function = get_nn_loss_function(
self.i_a, self.i_d,
self.benign_ratio)
if self.use_blocking_model:
self.attacker_utility = get_blocking_attacker_utility()
self.attacker_torch_utility = get_blocking_torch_grad_utility()
self.defender_conf.nn_conf.loss_function = \
get_blocking_nn_loss_function(self.benign_ratio)
else:
self.attacker_utility = get_latency_attacker_utility(self.i_a)
self.attacker_torch_utility = get_latency_torch_grad_utility(self.i_a)
self.defender_conf.nn_conf.loss_function = \
get_latency_nn_loss_function(self.i_a, self.i_d, self.benign_ratio)
@attr.s
class PlotterConfig:
# Sets logger to debug level
plot_enabled: bool = attr.ib(default=True)
@attr.s
......@@ -111,13 +125,13 @@ class RootConfig:
# Sets logger to debug level
debug: bool = attr.ib(default=True)
# Determine whether to plot final results
# Do not use if features_count > 2!
plot_result: bool = attr.ib(default=True)
# Configuration of model used
model_conf: ModelConfig = attr.ib(default=ModelConfig())
# Information about plotting results
# Unused for features != 2
plot_conf: PlotterConfig = attr.ib(default=PlotterConfig())
if __name__ == "__main__":
a = RootConfig()
from typing import List, Callable
from math import log
import attr
import pandas as pd
@attr.s
class Feature:
name = attr.ib(factory=str)
func = attr.ib(factory=Callable) # Function f: str -> int
func = attr.ib(factory=Callable) # Function f: str -> float
# Calculate occurrence of the unusual letters in a string
def uncommon_letters_score(word: str) -> int:
unusual = ('q', 'x', 'z', 'f')
score = 0
for letter in word.lower():
if letter in unusual:
score += 1
return score
def remove_tld_from_domain(domain: str):
return domain.rsplit('.', 1)[0]
# Calculate domain normalised uncommon_letters_score
def normalised_letters_score(word: str) -> float:
return uncommon_letters_score(word) / 255
class BigramCalculator:
"""
Bigrams frequencies are calculated offline from 1 000 000 domains downloaded
from majestic site
majestic.com/reports/majestic-million
"""
def __init__(self, file_name: str = 'bigrams_frequency3.csv'):
dic = pd.read_csv(file_name).set_index('bigram').to_dict()
self.bigram_dict = dic['frequency']
# The most frequented bigram is 'in' with number of occurrences 154737
# It means it's also the biggest score the domain might get
self.max_frequency = 154737
def bigram(self, domain: str):
domain = remove_tld_from_domain(domain)
bigrams_count = 0
freq_sum = 0
for subdomain in domain.split("."):
for i in range(len(subdomain[:-1])):
current = subdomain[i] + subdomain[i+1]
freq_sum += self.bigram_dict.get(current, 0)
bigrams_count += 1
# Make a subtraction so the domains which have most in common with
# natural language tends to zero and the synthetic strings to 1
return self.max_frequency - (freq_sum / bigrams_count)
def normalised_bigram(self, domain: str):
return self.bigram(domain) / self.max_frequency
# Calculate entropy given the string
def entropy(word: str) -> float:
# Calculate entropy given the domain
def entropy(domain: str) -> float:
word = remove_tld_from_domain(domain)
e = 0.0
length = len(word)
occurrence = {}
......@@ -37,10 +61,10 @@ def entropy(word: str) -> float:
return e
# Calculate normalised domain entropy. Max possible entropy is 5.2 (longest
# domain can have up to 255 chars)
# There is 63 allowed symbols from English alphabet to be used in domain, thus
# max entropy is equal to log_2(63) = 5.977
def norm_entropy(word: str) -> float:
return entropy(word) / 5.2
return entropy(word) / 5.977
# Calculate normalised domain length
......@@ -48,6 +72,22 @@ def norm_len(word: str) -> float:
return len(word) / 255
# Calculate occurrence of the unusual letters in a string
def uncommon_letters_score(domain: str) -> int:
word = remove_tld_from_domain(domain)
unusual = ('q', 'x', 'z', 'f')
score = 0
for letter in word.lower():
if letter in unusual:
score += 1
return score
# Calculate domain normalised uncommon_letters_score
def normalised_letters_score(word: str) -> float:
return uncommon_letters_score(word) / 255
# Create first line to .csv file with features
def initial_line(features: List[Feature], debug: bool = False) -> str:
line = 'query' if debug else ''
......
This diff is collapsed.
This diff is collapsed.
#normalised entropy normalised length
0.7644570725417812 0.14901960784313725
0.6487610119759972 0.058823529411764705
0.6228656262697367 0.050980392156862744
0.6734524541770424 0.12156862745098039
0.5769230769230769 0.03137254901960784
0.6844383789661873 0.09803921568627451
0.1923076923076923 0.00784313725490196
0.728527180703221 0.11372549019607843
0.523447710555262 0.0392156862745098
0.6880422175220736 0.06274509803921569
0.5170618991424745 0.047058823529411764
0.7278734283533927 0.08235294117647059
0.5740823338527407 0.054901960784313725
0.43269230769230765 0.03137254901960784
0.6744020376170228 0.058823529411764705
0.7474696515636562 0.09411764705882353
0.7253952324758078 0.09411764705882353
0.6524514250863047 0.050980392156862744
0.746547136180166 0.09019607843137255
0.5821128525690937 0.050980392156862744
0.5485032695120159 0.06274509803921569
0.7120137523499513 0.11372549019607843
0.6629350960463866 0.09019607843137255
0.8597255953484009 0.17254901960784313
0.6820075662828875 0.0784313725490196
0.652642486838989 0.10196078431372549
0.7095584100383743 0.08235294117647059
0.5844516676736675 0.054901960784313725
0.6656384885021169 0.07058823529411765
0.6645481116499121 0.10196078431372549
0.6793410110581961 0.10196078431372549
0.6885504230244426 0.10196078431372549
0.6589633544719672 0.11372549019607843
0.6504938621684762 0.10196078431372549
0.1923076923076923 0.00784313725490196
0.710889422915196 0.09019607843137255
0.6796575040731424 0.09019607843137255
0.687811085559359 0.07450980392156863
0.6729283744485949 0.06666666666666667
0.6681740361741211 0.10196078431372549
0.5668659190807865 0.03529411764705882
0.6656384885021169 0.07058823529411765
0.7181362218410107 0.10196078431372549
0.665638488502117 0.07058823529411765
0.684330502509405 0.08235294117647059
0.664723992717491 0.058823529411764705
0.7705863130359535 0.12549019607843137
0.611924195146195 0.054901960784313725
0.6936416792644848 0.08627450980392157
0.6793410110581961 0.10196078431372549
This diff is collapsed.
#normalised entropy normalised length
0.9661064770376071 0.996078431372549
0.9610788560382011 0.996078431372549
0.9579793120834494 0.7372549019607844
0.9638118622668527 0.9921568627450981
0.9616181747939105 0.9921568627450981
0.970843382770317 0.9921568627450981
0.9636374461384305 0.9921568627450981
0.7645715212200813 0.2
0.9629108881373653 0.996078431372549
0.9624670315806567 0.9921568627450981
0.9612622516402483 0.9921568627450981
0.969517759610237 0.9921568627450981
0.9732506972020125 0.9921568627450981
0.970654588741244 0.9921568627450981
0.9602540346154786 0.9921568627450981
0.9557187990195201 0.996078431372549
0.9591474425451948 0.996078431372549
0.9763388643138835 0.9921568627450981
0.9714102661565281 0.996078431372549
0.8678333801854506 0.21568627450980393
0.9720180205687999 0.9921568627450981
0.9660264424349851 0.9921568627450981
0.958100902724699 0.996078431372549
0.9633172650419695 0.996078431372549
0.97101966840149 0.996078431372549
0.958566746473564 0.996078431372549
0.9665604824002637 0.996078431372549
0.9646730848103486 0.996078431372549
0.9592227384561766 0.996078431372549
0.9482644048575208 0.5176470588235295
0.9644728182269342 0.996078431372549
0.9671892948089387 0.9921568627450981
0.972833060048564 0.996078431372549
0.9671906494517046 0.996078431372549
0.9625013167117836 0.9921568627450981
0.9603983498050831 0.9921568627450981
0.9045124885619298 0.21568627450980393
0.9657052169917598 0.996078431372549
0.9681739135520024 0.996078431372549
0.9726964093999644 0.9921568627450981
0.9755889511207536 0.9921568627450981
0.960900346780429 0.996078431372549
0.9644852644107168 0.9921568627450981
0.9640389783314874 0.9921568627450981
0.9533661640425967 0.4196078431372549
0.9686229747256779 0.9921568627450981
0.9669524163592599 0.9921568627450981
0.9433884160478205 0.4196078431372549
0.9714749468195338 0.996078431372549
0.9583216630448729 0.996078431372549
import random
from os.path import dirname
from pathlib import Path
from typing import List
from pandas import read_csv
import pandas as pd
from src.data.features import Feature, initial_line, score_query, norm_entropy, norm_len
from src.data.features import Feature, initial_line, score_query, norm_entropy, \
norm_len, BigramCalculator
def score_benign_dns_log(features: List[Feature], debug=False) -> List[str]:
......@@ -23,6 +26,28 @@ def score_benign_dns_log(features: List[Feature], debug=False) -> List[str]:
return result
def cache_features_wrapper(features, x, cache):
domain = x[0]
if domain in cache:
return cache[domain]
res = [feature.func(domain) for feature in features]
cache[domain] = res
return res
def score_single_queries(file_name: str, features: List[Feature]) -> pd.DataFrame:
file = Path(dirname(__file__)) / Path('raw') / Path(file_name)
df = pd.read_csv(file, header=None, sep=" ", engine="python")
cache = {}
columns = list(map(lambda x: x.name, features))
apply_lambda=lambda x: pd.Series(cache_features_wrapper(features, x, cache),
index=columns)
return df.apply(apply_lambda, axis=1)
def score_csv_dns_log(features: List[Feature], debug=False) -> List[str]:
content = read_csv('raw/b32_M250.csv')
result = [initial_line(features, debug)]
......@@ -39,20 +64,19 @@ def score_csv_dns_log(features: List[Feature], debug=False) -> List[str]:
return result
def write_scored(path: str, result: List[str]):
with open(path, 'w', encoding='utf-8') as file:
for item in result:
file.write(f'{item}\n')
def write_scored(file_name: str, result: str):
with open(f'scored/{file_name}', 'w', encoding='utf-8') as file:
file.write(result)
if __name__ == "__main__":
bigram_calculator = BigramCalculator()
features = [
Feature('normalised entropy', norm_entropy),
Feature('normalised length', norm_len)
]
res = score_benign_dns_log(features)
write_scored('scored/all_benign_scored.csv', res)
res = score_csv_dns_log(features)
write_scored('scored/scored_malicious.csv', res)
df = score_single_queries('ctu_1mil', features)
print("Scored. Now write the file")
write_scored('scored_ctu_1mil_2features.csv', df.to_csv())
......@@ -2,7 +2,6 @@ import logging
from src.config import RootConfig
from src.game_solver import GameSolver, Result
from src.visual.plotter import Plotter
logger = logging.getLogger(__name__)
......@@ -26,14 +25,13 @@ class Game:
self.result = gs.double_oracle()
self._write_summary()
# self._plot_result()
def _write_summary(self):
print('\n\n-------------------------------------------------')
logger.info(f'Game has ended with these values\n'
f'transformed zero sum game value: {self.result.zero_sum_nash_val}'
f'attacker value of original game: {self.result.attacker_value}'
f'defender value of original game: {self.result.defender_value}')
logger.info(f'Game has ended with these values'
f'\ttransformed zero sum game value: {self.result.zero_sum_nash_val}'
f'\tattacker value of original game: {self.result.attacker_value}'
f'\tdefender value of original game: {self.result.defender_value}')
logger.info('Attacker: action x probability')
for a, p in zip(self.result.ordered_actions_p1, self.result.probs_p1):
......@@ -45,13 +43,6 @@ class Game:
logger.info(f'{nn} x {p}')
print('-------------------------------------------------')
def _plot_result(self):
if not self._conf.plot_result:
return
logger.debug("Plotting result...")
p = Plotter(self.result)
p.plot_result()
if __name__ == "__main__":
Game().solve_game()
......@@ -3,15 +3,13 @@ from itertools import count
from typing import List
import attr
import matplotlib.pyplot as plt
import numpy as np
import pulp
import torch
from actors.attacker import DiscreteAttacker, GradientAttacker
from actors.defender import Defender
from config import RootConfig
from src.neural_networks.network import NeuralNetwork
from visual.plotter import BlockingPlotter, LatencyPlotter, FakePlotter
logger = logging.getLogger(__name__)
......@@ -39,7 +37,6 @@ def get_fp_cost(probs2, played2):
class GameSolver:
def __init__(self, conf: RootConfig):
self.conf = conf.model_conf
self.plot = conf.plot_result
# Define game actors
if conf.model_conf.attacker_conf.use_gradient_descent:
......@@ -48,89 +45,15 @@ class GameSolver:
self.attacker = DiscreteAttacker(conf.model_conf)
self.defender = Defender(conf.model_conf)
# Variables for plotting progress
if conf.plot_result:
self._init_plots()
def _init_plots(self):
plt.ion()
self.fig, self.ax = plt.subplots(2, 3)
self.actions = torch.tensor(self.attacker.create_discrete_actions()).float()
self.plotted = []
self.ax[0][0].set_title('Defender nash strategy')
self.ax[0][1].set_title('Attacker nash strategy')
self.ax[0][1].set_xlim([0, 1])
self.ax[0][1].set_ylim([0, 1])
self.ax[0][2].set_title('All attackers actions played')
self.ax[0][2].set_xlim([0, 1])
self.ax[0][2].set_ylim([0, 1])
self.ax[1][0].set_title('Defender best response')
self.ax[1][1].set_title('Attacker best response')
self.ax[1][1].set_ylim([0, 1])
self.ax[1][1].set_ylim([0, 1])
self.ax[1][2].set_title('Nothing for now')
plt.tight_layout()
def plot_paths(self):
for iteration, points in self.val_paths:
plt.title("All iterations so far")
plt.scatter([iteration], points[:1], c='blue', s=20) # nash
plt.scatter([iteration], points[1:2], c='red', s=10) # attacker brp
plt.scatter([iteration], points[2:3], c='green', s=10) # defender brp
plt.show()
for iteration, points in self.val_paths[len(self.val_paths)-5:]:
plt.title("Last 5 iterations")
plt.scatter([iteration], points[:1], c='blue', s=20) # nash
plt.scatter([iteration], points[1:2], c='red', s=10) # attacker brp
plt.scatter([iteration], points[2:3], c='green', s=10) # defender brp
plt.show()
def plot_iteration(self, iteration, zero_sum_val, played_p2, probs_p2,
played_p1, probs_p1, br_p1, br_p2):
# Remove all lines from previous iteration plotting
for item in self.plotted:
item.remove()
self.plotted = []
# Set title of current figure
self.fig.suptitle(f'Iteration: {iteration}, value: {zero_sum_val}')
# Plot heat-map of defender's nash strategy actions
res = np.zeros((101, 101))
for nn, prob in zip(played_p2, probs_p2):
if prob == 0: continue
predictions = nn.latency_predict(self.actions).numpy()
res += (predictions * prob).reshape((101, 101))
self.plotted.append(self.ax[0][0].imshow(res, cmap='Reds', vmin=0,
vmax=1, origin='lower', interpolation='spline16'))
# Plot heat-map of defender's best response
res = br_p2.latency_predict(self.actions).numpy().reshape((101, 101))
self.plotted.append(self.ax[1][0].imshow(res, cmap='Reds', vmin=0,
vmax=1, origin='lower', interpolation='spline16'))
# Plot attacker nash strategy
for point, prob in zip(played_p1, probs_p1):
if prob == 0:
continue
self.plotted.append(self.ax[0][1].scatter(point[0], point[1], c='red', marker='^'))
self.plotted.append(self.ax[0][1].annotate(f'{round(prob, 2)}', (point[0], point[1])))
# Plot attacker best response
self.plotted.append(self.ax[1][1].scatter(br_p1[0], br_p1[1], c='red'))
# Add attacker new action to subplot with all his actions
self.ax[0][2].scatter(br_p1[0], br_p1[1], c='blue', marker='^')
# Show the result
self.fig.canvas.draw()
plt.pause(0.000001)
# Set up proper plotter for plotting results
if conf.model_conf.features_count != 2:
self.plotter = FakePlotter()
else:
discr_actions = self.attacker.create_discrete_actions()
if conf.model_conf.use_blocking_model:
self.plotter = BlockingPlotter(conf.plot_conf, discr_actions)
else:
self.plotter = LatencyPlotter(conf.plot_conf, discr_actions)
def double_oracle(self) -> Result:
# Get initial actions as the first ones
......@@ -152,10 +75,13 @@ class GameSolver:
attacker_value = zero_sum_nash_val - fp_cost
defender_value = -zero_sum_nash_val
# Plot progress
if self.plot:
self.plot_iteration(i, zero_sum_nash_val, played_actions_p2, probs_p2,
played_actions_p1, probs_p1, br_p1, br_p2)
# Plot progress using plotter
self.plotter.plot_iteration(i, zero_sum_nash_val,
self.attacker.value_of_last_brp + fp_cost,
br_p2.final_loss,
played_actions_p2, probs_p2,
played_actions_p1, probs_p1,
br_p1, br_p2)
# Are those new actions good enough?
br_p1_exists = self.attacker.does_br_exists(played_actions_p1,
......
import itertools
import logging
import time
from pathlib import Path
from typing import Callable
import attr
import numpy as np
......@@ -8,9 +9,10 @@ import torch
from sklearn.utils import shuffle
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
from config import NeuralNetworkConfig, RootConfig
from src.data.loader import np_arrays_from_scored_csv
from data.loader import np_arrays_from_scored_csv
logger = logging.getLogger(__name__)
......@@ -31,15 +33,44 @@ class OrderCounter:
return OrderCounter.order
class SoftClip(nn.Module):
""" SoftClipping activation function
https://arxiv.org/pdf/1810.11509.pdf
"""
def __init__(self, p=50.0):
super().__init__()
self.p = p
def forward(self, x):
first_pow = torch.pow(np.e, torch.mul(x, self.p))
second_pow = torch.pow(np.e, torch.mul(torch.add(x, -1.0), self.p))
first_div = torch.add(first_pow, 1.0)
second_div = torch.add(second_pow, 1.0)
division = torch.div(first_div, second_div)
second_part_log = torch.log(division)
first_part = 1.0 / self.p
if len(second_part_log[torch.isnan(second_part_log)]):
print("prdel") # todo remove me