Commit 6f452843 by Jiri Borovec

add logging & fix

parent 9c7d76df
......@@ -304,9 +304,9 @@ def dataset_binary_combine_patterns(im_ptns, out_dir, nb_samples=NB_SAMPLES,
tqdm_bar.update(1)
mproc_pool.close()
mproc_pool.join()
df_weights.columns = ['name'] + [COLUMN_NAME.format(i + 1)
for i in range(len(df_weights.columns) - 1)]
df_weights = df_weights.set_index('name')
df_weights.columns = ['image'] + [COLUMN_NAME.format(i + 1)
for i in range(len(df_weights.columns) - 1)]
df_weights = df_weights.set_index('image')
logger.debug(df_weights.head())
return im_spls, df_weights
......
......@@ -30,8 +30,6 @@ import dataset_utils as gen_data
import pattern_disctionary as ptn_dict
import src.own_utils.tool_experiments as tl_expt
logger = logging.getLogger(__name__)
def _reduce_method(m):
# REQURED FOR MPROC POOL
......@@ -44,25 +42,17 @@ def _reduce_method(m):
copy_reg.pickle(types.MethodType, _reduce_method)
b_cmp = True
if b_cmp:
PATH_DATA_SYNTH = '/datagrid/Medical/microscopy/drosophila/synthetic_data'
PATH_DATA_REAL = '/datagrid/Medical/microscopy/drosophila/TEMPORARY'
PATH_OUTPUT = '/datagrid/Medical/microscopy/drosophila/TEMPORARY'
else:
PATH_DATA_SYNTH = '/home/b_jirka/TEMP/APD_synthetic_data'
PATH_DATA_REAL = ''
PATH_OUTPUT = '/home/b_jirka/TEMP'
NB_THREADS = int(mproc.cpu_count() * .9)
PATH_RESULTS = os.path.join(PATH_OUTPUT, 'experiments_APD_temp')
PATH_DATA_SYNTH = '/datagrid/Medical/microscopy/drosophila/synthetic_data'
PATH_DATA_REAL = '/datagrid/Medical/microscopy/drosophila/TEMPORARY'
PATH_RESULTS = '/datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD_temp'
DEFAULT_PARAMS = {
'computer': os.uname(),
'nb_samples': None, # msc. rnd
'init_tp': 'msc', # msc. rnd
'max_iter': 39,
'nb_samples': None,
'init_tp': 'rnd', # msc. rnd
'max_iter': 100,
'gc_regul': 1e-2,
'nb_labels': 12,
'nb_labels': 15,
'nb_runs': 25, # 500
'gc_reinit': True,
'ptn_split': True,
......@@ -76,14 +66,21 @@ SYNTH_SUB_DATASETS = ['datasetBinary_raw',
'datasetBinary_deform',
'datasetBinary_defNoise']
SYNTH_PARAMS = DEFAULT_PARAMS.copy()
SYNTH_PARAMS.update({'data_type': 'synthetic',
'path_in': SYNTH_PATH_APD,
'dataset': SYNTH_SUB_DATASETS[0],
'path_out': PATH_RESULTS})
SYNTH_PTN_RANGE = {'atomicPatternDictionary_v0': range(2, 14, 1),
'atomicPatternDictionary_v1': range(5, 20, 1),
'atomicPatternDictionary_v2': range(10, 40, 2),
'atomicPatternDictionary_v3': range(10, 40, 2),}
SYNTH_PARAMS.update({
'data_type': 'synthetic',
'path_in': SYNTH_PATH_APD,
'dataset': SYNTH_SUB_DATASETS[0],
'path_out': PATH_RESULTS,
})
SYNTH_PTN_RANGE = {
'atomicPatternDictionary_00': range(5),
'atomicPatternDictionary_v0': range(2, 14, 1),
'atomicPatternDictionary_v1': range(5, 20, 1),
'atomicPatternDictionary_v2': range(10, 40, 2),
'atomicPatternDictionary_v3': range(10, 40, 2),
'atomicPatternDictionary3D_v0': range(2, 14, 1),
'atomicPatternDictionary3D_v1': range(6, 30, 2),
}
# SYNTH_RESULTS_NAME = 'experiments_APD'
REAL_DATASET_NAME = '1000_images_improved_binary'
......@@ -94,12 +91,13 @@ REAL_SUB_DATASETS = [
'gene_ssmall',
]
REAL_PARAMS = DEFAULT_PARAMS.copy()
REAL_PARAMS.update({'data_type': 'real',
'path_in': os.path.join(PATH_DATA_REAL, REAL_DATASET_NAME),
'dataset': REAL_SUB_DATASETS[0],
'path_out': PATH_RESULTS,
'max_iter': 50,
'nb_runs': 10})
REAL_PARAMS.update({
'data_type': 'real',
'path_in': os.path.join(PATH_DATA_REAL, REAL_DATASET_NAME),
'dataset': REAL_SUB_DATASETS[0],
'path_out': PATH_RESULTS,
'max_iter': 50,
'nb_runs': 10})
# PATH_OUTPUT = os.path.join('..','..','results')
......@@ -173,7 +171,7 @@ class ExperimentAPD(tl_expt.Experiment):
# self.params.export_as(self.path_stat)
str_params = 'PARAMETERS: \n' + '\n'.join(['"{}": \t {}'.format(k, v)
for k, v in self.params.iteritems()])
logger.info(str_params)
logging.info(str_params)
with open(self.path_stat, 'w') as fp:
fp.write(str_params)
......@@ -191,7 +189,7 @@ class ExperimentAPD(tl_expt.Experiment):
:param gt: bool
"""
logger.info('loading required data')
logging.info('loading required data')
if gt:
self._load_data_ground_truth()
self._load_images()
......@@ -210,7 +208,7 @@ class ExperimentAPD(tl_expt.Experiment):
:param iter_var: str name of variable to be iterated in the experiment
:param iter_vals: [] list of possible values for :param iter_var:
"""
logger.info('perform the complete experiment')
logging.info('perform the complete experiment')
self.iter_var_name = iter_var
self.iter_values = iter_vals
super(ExperimentAPD, self).run(gt)
......@@ -222,12 +220,12 @@ class ExperimentAPD(tl_expt.Experiment):
def _perform_sequence(self):
""" iteratively change a single experiment parameter with the same data
"""
logger.info('perform_sequence in single thread')
logging.info('perform_sequence in single thread')
self.l_stat = []
tqdm_bar = tqdm.tqdm(total=len(self.iter_values))
for v in self.iter_values:
self.params[self.iter_var_name] = v
logger.debug(' -> set iterable "%s" on %s', self.iter_var_name,
logging.debug(' -> set iterable "%s" on %s', self.iter_var_name,
repr(self.params[self.iter_var_name]))
t = time.time()
stat = self._perform_once(v)
......@@ -267,7 +265,7 @@ class ExperimentAPD(tl_expt.Experiment):
if not hasattr(self, '_im_names'):
self._im_names = map(str, range(self.w_bins.shape[0]))
df = pd.DataFrame(data=self.w_bins, index=self._im_names[:len(self.w_bins)])
df.columns = ['ptn {}'.format(lb + 1) for lb in df.columns]
df.columns = ['ptn {:02d}'.format(lb + 1) for lb in df.columns]
df.index.name = 'image'
df.to_csv(path_csv)
......@@ -279,12 +277,12 @@ class ExperimentAPD(tl_expt.Experiment):
:return: {str: float, }
"""
stat = {}
logger.debug('compute static - %s', hasattr(self, 'gt_atlas'))
logging.debug('compute static - %s', hasattr(self, 'gt_atlas'))
if hasattr(self, 'gt_atlas') and hasattr(self, 'atlas'):
if self.gt_atlas.shape == self.atlas.shape:
stat['atlas_ARS'] = metrics.adjusted_rand_score(self.gt_atlas.ravel(),
self.atlas.ravel())
logger.debug('compute reconstruction - %s', hasattr(self, 'gt_img_rct'))
logging.debug('compute reconstruction - %s', hasattr(self, 'gt_img_rct'))
# error estimation from original reconstruction
if hasattr(self, 'gt_img_rct') and img_rct is not None:
# img_rct = ptn_dict.reconstruct_samples(self.atlas, self.w_bins)
......@@ -302,17 +300,17 @@ class ExperimentAPD(tl_expt.Experiment):
if self.iter_var_name in stat:
self.df_stat = self.df_stat.set_index(self.iter_var_name)
path_csv = os.path.join(self.params.get('path_exp'), self.RESULTS_CSV)
logger.debug('save results: "%s"', path_csv)
logging.debug('save results: "%s"', path_csv)
self.df_stat.to_csv(path_csv)
def _summarise(self):
""" summarise and export experiment results """
logger.info('summarise the experiment')
logging.info('summarise the experiment')
if hasattr(self, 'df_stat') and not self.df_stat.empty:
with open(self.path_stat, 'a') as fp:
fp.write('\n' * 3 + 'RESULTS: \n' + '=' * 9)
fp.write('\n{}'.format(self.df_stat.describe()))
logger.debug('statistic: \n%s', repr(self.df_stat.describe()))
logging.debug('statistic: \n%s', repr(self.df_stat.describe()))
class ExperimentAPD_mp(ExperimentAPD):
......@@ -338,7 +336,7 @@ class ExperimentAPD_mp(ExperimentAPD):
def _warp_perform_once(self, v):
self.params[self.iter_var_name] = v
logger.debug(' -> set iterable "%s" on %s', self.iter_var_name,
logging.debug(' -> set iterable "%s" on %s', self.iter_var_name,
repr(self.params[self.iter_var_name]))
t = time.time()
# stat = super(ExperimentAPD_mp, self)._perform_once(v)
......@@ -361,7 +359,7 @@ class ExperimentAPD_mp(ExperimentAPD):
def _perform_sequence(self):
""" perform sequence in multiprocessing pool """
logger.debug('perform_sequence in %i threads', self.nb_jobs)
logging.debug('perform_sequence in %i threads', self.nb_jobs)
# ISSUE with passing large date to processes so the images are saved
# and loaded in particular process again
# p_imgs = os.path.join(self.params.get('path_exp'), 'input_images.npz')
......
......@@ -5,6 +5,26 @@
# python run_experiment_apd_sta.py \
# -in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v0 \
# -out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta
#
# python run_experiment_apd_sta.py \
# -in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v1 \
# -out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta
#
# python run_experiment_apd_sta.py \
# -in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v2 \
# -out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta
#
# python run_experiment_apd_sta.py \
# -in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v3 \
# -out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta
#
# python run_experiment_apd_sta.py \
# -in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary3D_v0 \
# -out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-3D-sta
#
# python run_experiment_apd_sta.py \
# -in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary3D_v1 \
# -out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-3D-sta
# OUR method
......
......@@ -83,8 +83,9 @@ def weights_image_atlas_overlap_threshold(img, atlas, thr=0.5):
# simple weight
labels = np.unique(atlas).tolist()
# logger.debug('weights image by atlas with labels: {}'.format(lbs))
if 0 in labels: labels.remove(0)
weight = [0] * np.max(labels)
if 0 in labels:
labels.remove(0)
weight = [0] * np.max(atlas)
for lb in labels:
equal = np.sum(img[atlas == lb])
total = np.sum(atlas == lb)
......
......@@ -18,6 +18,7 @@ import time
import traceback
import logging
import copy
import gc
import matplotlib
matplotlib.use('Agg')
......@@ -33,10 +34,8 @@ import pattern_disctionary as ptn_dict
import pattern_weights as ptn_weigth
import experiment_apd
logger = logging.getLogger(__name__)
NB_THREADS = experiment_apd.NB_THREADS
PATH_OUTPUT = experiment_apd.PATH_OUTPUT
SYNTH_PARAMS = experiment_apd.SYNTH_PARAMS
SYNTH_SUB_DATASETS = experiment_apd.SYNTH_SUB_DATASETS
SYNTH_PTN_RANGE = experiment_apd.SYNTH_PTN_RANGE
......@@ -64,20 +63,20 @@ def test_simple_show_case():
plt.imshow(img, cmap='gray', interpolation='nearest')
t = time.time()
uc = dl.compute_relative_penaly_images_weights(imgs, np.array(ws))
logger.debug('elapsed TIME: %s', repr(time.time() - t))
logging.debug('elapsed TIME: %s', repr(time.time() - t))
res = dl.estimate_atlas_graphcut_general(imgs, np.array(ws), 0.)
plt.subplot(gs[0, -1]), plt.title('result')
plt.imshow(res, cmap=cm, interpolation='nearest'), plt.colorbar()
uc = uc.reshape(atlas.shape+uc.shape[2:])
# logger.debug(ws)
# logging.debug(ws)
for i in range(uc.shape[2]):
plt.subplot(gs[1, i])
plt.imshow(uc[:,:,i], vmin=0, vmax=1, interpolation='nearest')
plt.title('cost lb #{}'.format(i)), plt.colorbar()
# logger.debug(uc)
# logging.debug(uc)
def experiment_pipeline_alpe_showcase(path_out=PATH_OUTPUT):
def experiment_pipeline_alpe_showcase(path_out):
""" an simple show case to prove that the particular steps are computed
:param path_in: str
......@@ -129,10 +128,10 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
:param init_atlas: np.array<w, h>
:return: np.array, np.array
"""
logger.info(' -> estimate atlas...')
logging.info(' -> estimate atlas...')
p = self.params.copy()
p[self.iter_var_name] = v
logger.debug('PARAMS: %s', repr(p))
logging.debug('PARAMS: %s', repr(p))
init_atlas = self._init_atlas(p['nb_labels'], p['init_tp'])
# prefix = 'expt_{}'.format(p['init_tp'])
path_out = os.path.join(p['path_exp'],
......@@ -149,8 +148,8 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
ptn_split=p['ptn_split'], overlap_major=p['overlap_mj'],
out_dir=path_out) # , out_prefix=prefix
except:
logger.error('FAILED, no atlas estimated!')
logger.error(traceback.format_exc())
logging.error('FAILED, no atlas estimated!')
logging.error(traceback.format_exc())
atlas = np.zeros_like(self.imgs[0])
w_bins = np.zeros((len(self.imgs), 0))
assert atlas.max() == w_bins.shape[1]
......@@ -163,11 +162,11 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
:param v: value
:return: {str: ...}
"""
logger.info('perform single experiment...')
logging.info('perform single experiment...')
self._estimate_atlas(v)
logger.debug('atlas of size %s and labels %s', repr(self.atlas.shape),
logging.debug('atlas of size %s and labels %s', repr(self.atlas.shape),
repr(np.unique(self.atlas).tolist()))
logger.debug('weights of size %s and summing %s', repr(self.w_bins.shape),
logging.debug('weights of size %s and summing %s', repr(self.w_bins.shape),
repr(np.sum(self.w_bins, axis=0)))
name_posix = '_{}_{}'.format(self.iter_var_name, v)
self._export_atlas(name_posix)
......@@ -192,11 +191,11 @@ def experiments_test(dict_params=experiment_apd.SYNTH_PARAMS):
params = copy.deepcopy(dict_params)
params['nb_runs'] = 3
logger.info('RUN: ExperimentALPE')
logging.info('RUN: ExperimentALPE')
expt = ExperimentALPE(params)
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
logger.info('RUN: ExperimentALPE_mp')
logging.info('RUN: ExperimentALPE_mp')
expt_p = ExperimentALPE_mp(params)
expt_p.run(iter_var='case', iter_vals=range(params['nb_runs']))
......@@ -219,23 +218,28 @@ def experiments_synthetic(params=SYNTH_PARAMS):
l_params = [params]
l_params = experiment_apd.extend_list_params(l_params, 'dataset', SYNTH_SUB_DATASETS)
l_params = experiment_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
# l_params = experiment_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = experiment_apd.extend_list_params(l_params, 'ptn_split', [True, False])
ptn_range = SYNTH_PTN_RANGE[os.path.basename(params['path_in'])]
l_params = experiment_apd.extend_list_params(l_params, 'nb_labels', ptn_range)
l_params = experiment_apd.extend_list_params(l_params, 'gc_regul',
[0., 1e-6, 1e-3, 1e-1, 1e0])
ptn_range = SYNTH_PTN_RANGE[os.path.basename(params['path_in'])]
l_params = experiment_apd.extend_list_params(l_params, 'nb_labels', ptn_range)
logger.debug('list params: %i', len(l_params))
logging.debug('list params: %i', len(l_params))
tqdm_bar = tqdm.tqdm(total=len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
exp.run(iter_var='case', iter_vals=range(params['nb_runs']))
try:
if nb_jobs > 1:
expt = ExperimentALPE_mp(params, nb_jobs)
else:
expt = ExperimentALPE(params)
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
# exp.run(iter_var='nb_labels', iter_vals=ptn_range)
del expt
except: pass
tqdm_bar.update(1)
gc.collect(), time.sleep(1)
def experiments_real(params=REAL_PARAMS):
......@@ -257,23 +261,24 @@ def experiments_real(params=REAL_PARAMS):
[0., 1e-9, 1e-6, 1e-3, 1e-2, 1e-1])
# l_params = experiment_apd.extend_list_params(l_params, 'nb_labels',
# [5, 9, 12, 15, 20, 25, 30, 40])
logger.debug('list params: %i', len(l_params))
logging.debug('list params: %i', len(l_params))
tqdm_bar = tqdm.tqdm(total=len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, nb_jobs)
expt = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
expt = ExperimentALPE(params)
# exp.run(gt=False, iter_var='case', iter_values=range(params['nb_runs']))
exp.run(gt=False, iter_var='nb_labels',
expt.run(gt=False, iter_var='nb_labels',
iter_vals=[9, 12, 15, 20, 25, 30])
tqdm_bar.update(1)
gc.collect(), time.sleep(1)
def main():
logging.basicConfig(level=logging.INFO)
logger.info('running...')
logging.info('running...')
# test_encoding(atlas, imgs, encoding)
# test_atlasLearning(atlas, imgs, encoding)
......@@ -284,7 +289,7 @@ def main():
# experiments_real()
logger.info('DONE')
logging.info('DONE')
if __name__ == "__main__":
......
......@@ -13,6 +13,8 @@ Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
import os
import sys
import time
import gc
import logging
# to suppress all visu, has to be on the beginning
......@@ -28,7 +30,6 @@ import experiment_apd
import pattern_disctionary as ptn_dict
import pattern_weights as ptn_weight
logger = logging.getLogger(__name__)
SYNTH_PARAMS = experiment_apd.SYNTH_PARAMS
SYNTH_SUB_DATASETS = experiment_apd.SYNTH_SUB_DATASETS
......@@ -56,8 +57,8 @@ class ExperimentLinearCombine(experiment_apd.ExperimentAPD_mp):
:return:
"""
self._estimate_linear_combination(imgs_vec)
logger.debug('fitting parameters: %s', repr(self.estimator.get_params()))
logger.debug('number of iteration: %i', self.estimator.n_iter_)
logging.debug('fitting parameters: %s', repr(self.estimator.get_params()))
logging.debug('number of iteration: %i', self.estimator.n_iter_)
atlas_ptns = self.components.reshape((-1, ) + self.imgs[0].shape)
rct_vec = np.dot(self.fit_result, self.components)
......@@ -226,18 +227,20 @@ def experiments_synthetic(params=SYNTH_PARAMS):
expt = cls_expt(params)
expt.run(iter_var='nb_labels', iter_vals=ptn_range)
tqdm_bar.update(1)
del expt
gc.collect(), time.sleep(1)
def main():
""" main_real entry point """
logging.basicConfig(level=logging.INFO)
logger.info('running...')
logging.info('running...')
# experiments_test()
experiments_synthetic()
logger.info('DONE')
logging.info('DONE')
# plt.show()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment