Commit 50dfb691 authored by Jiri Borovec's avatar Jiri Borovec

args update

parent 69f975ce
......@@ -344,12 +344,12 @@ def export_image(path_out, img, im_name, name_template=SEGM_PATTERN):
if not isinstance(im_name, str):
im_name = name_template.format(im_name)
path_img = os.path.join(path_out, im_name)
im_norm = img / (np.max(img) * 1.)
logger.debug(' .. saving image %s with %s to "%s...%s"', repr(im_norm.shape),
repr(np.unique(im_norm)), path_img[:25], path_img[-25:])
logger.debug(' .. saving image %s with %s to "%s...%s"', repr(img.shape),
repr(np.unique(img)), path_img[:25], path_img[-25:])
if img.ndim == 2 or img.shape[2] <= 3:
im_norm = img / float(np.max(img)) * 255
# io.imsave(path_img, im_norm)
Image.fromarray(im_norm).save(path_img + '.png')
Image.fromarray(im_norm.astype(np.uint8)).save(path_img + '.png')
elif img.ndim == 3:
img_clip = img / float(img.max()) * 255**2
tif = libtiff.TIFF.open(path_img + '.tiff', mode='w')
......@@ -502,14 +502,16 @@ def dataset_load_weights(path_base, name_csv=CSV_NAME_WEIGHTS):
:param name_csv: str, name of file with weights
:return: np.array<nb_imgs, nb_lbs>
"""
pFile = os.path.join(path_base, name_csv)
df = pd.DataFrame().from_csv(pFile)
coding = df['combination'].values.tolist()
logger.debug('encoding of length: %i', len(coding))
encoding = []
for c in coding:
vec = [int(x) for x in c.split(';')]
encoding.append(vec)
path_csv = os.path.join(path_base, name_csv)
df = pd.DataFrame().from_csv(path_csv)
# for the original encoding as string in single column
if 'combination' in df.columns:
coding = df['combination'].values.tolist()
logger.debug('encoding of length: %i', len(coding))
encoding = np.array([[int(x) for x in c.split(';')] for c in coding])
# the new encoding with pattern names
else:
encoding = df.as_matrix()
return np.array(encoding)
......
......@@ -360,7 +360,7 @@ def alpe_update_atlas(imgs, atlas, w_bins, lb_max, gc_coef, gc_reinit, ptn_split
def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=None,
gc_coef=0.0, thr_step_diff=0.0, max_iter=99, gc_reinit=True,
ptn_split=True, overlap_major=False, out_prefix='debug', out_dir=''):
""" the experiments_synthetic_single_run pipeline for block coordinate descent
""" the experiments_synthetic pipeline for block coordinate descent
algo with graphcut...
:param imgs: [np.array<w, h>]
......@@ -405,6 +405,6 @@ def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=Non
logger.info('>> exiting while the atlas diff %f is smaller then %f',
step_diff, thr_step_diff)
break
atlas = sk_image.relabel_sequential(atlas)
atlas = sk_image.relabel_sequential(atlas)[0]
w_bins = [ptn_weight.weights_image_atlas_overlap_major(img, atlas) for img in imgs]
return atlas, np.array(w_bins)
This diff is collapsed.
......@@ -32,9 +32,12 @@ import experiment_apd
logger = logging.getLogger(__name__)
REAL_PARAMS = experiment_apd.REAL_PARAMS
NB_THREADS = experiment_apd.NB_THREADS
PATH_OUTPUT = experiment_apd.PATH_OUTPUT
SYNTH_PARAMS = experiment_apd.SYNTH_PARAMS
SYNTH_SUB_DATASETS = experiment_apd.SYNTH_SUB_DATASETS
SYNTH_PTN_RANGE = experiment_apd.SYNTH_PTN_RANGE[experiment_apd.SYNTH_DATASET_VERSION]
REAL_PARAMS = experiment_apd.REAL_PARAMS
def test_simple_show_case():
......@@ -121,7 +124,7 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
:param i: int, index of try
:param init_atlas: np.array<w, h>
:return:
:return: np.array, np.array
"""
logger.info(' -> estimate atlas...')
p = self.params.copy()
......@@ -135,10 +138,10 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
os.mkdir(path_out)
try:
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(self.imgs,
init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
gc_coef=p['gc_regul'], max_iter=p['max_iter'],
ptn_split=p['ptn_split'], overlap_major=p['overlap_mj'],
out_dir=path_out) # , out_prefix=prefix
init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
gc_coef=p['gc_regul'], max_iter=p['max_iter'],
ptn_split=p['ptn_split'], overlap_major=p['overlap_mj'],
out_dir=path_out) # , out_prefix=prefix
except:
logger.error('FAILED, no atlas estimated!')
logger.error(traceback.format_exc())
......@@ -151,7 +154,7 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
""" perform single experiment
:param v: value
:return:
:return: {str: ...}
"""
logger.info('perform single experiment...')
self.atlas, self.w_bins = self._estimate_atlas(v)
......@@ -159,12 +162,12 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
repr(np.unique(self.atlas).tolist()))
logger.debug('weights of size %s and summing %s', repr(self.w_bins.shape),
repr(np.sum(self.w_bins, axis=0)))
self._export_atlas(self.atlas, posix='_{}_{}'.format(self.iter_var_name, v))
self._export_coding(self.w_bins, self._im_names,
posix='_{}_{}'.format(self.iter_var_name, v))
name_posix = '_{}_{}'.format(self.iter_var_name, v)
self._export_atlas(name_posix)
self._export_coding(name_posix)
self.img_rct = ptn_dict.reconstruct_samples(self.atlas, self.w_bins)
stat = self._compute_statistic_gt(self.atlas, self.img_rct)
stat.update({self.iter_var_name: v})
stat[self.iter_var_name] = v
return stat
......@@ -191,32 +194,31 @@ def experiments_test(dict_params=experiment_apd.SYNTH_PARAMS):
expt_p.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_synthetic(dataset=None, nb_jobs=NB_THREADS,
dict_params=experiment_apd.SYNTH_PARAMS,
sub_datasets=experiment_apd.SYNTH_SUB_DATASETS,
ptn_range=experiment_apd.SYNTH_PTN_RANGE,
ds_version=experiment_apd.SYNTH_DATASET_VERSION):
def experiments_synthetic(dataset=None, dict_params=SYNTH_PARAMS,
sub_datasets=SYNTH_SUB_DATASETS, ptn_range=SYNTH_PTN_RANGE):
""" run all experiments
:param dataset: str, name of dataset
:param nb_jobs: int
:param dict_params: {str: value}
:param sub_datasets: [str]
:param ptn_range: {str: [int]}
:param ptn_range: [int]
:param ds_version: str, version of dataset
"""
logging.basicConfig(level=logging.INFO)
arg_params = experiment_apd.parse_params(dict_params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
nb_jobs = arg_params['nb_jobs']
params = copy.deepcopy(dict_params)
if isinstance(dataset, str):
params.update({'dataset': dataset})
l_params = [params]
l_params = experiment_apd.extend_l_params(l_params, 'sub_dataset', sub_datasets)
l_params = experiment_apd.extend_l_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = experiment_apd.extend_l_params(l_params, 'ptn_split', [True, False])
l_params = experiment_apd.extend_l_params(l_params, 'nb_lbs', ptn_range[ds_version])
l_params = experiment_apd.extend_l_params(l_params, 'gc_regul', [0., 1e-3, 1e-1, 1e0])
l_params = experiment_apd.extend_list_params(l_params, 'sub_dataset', sub_datasets)
# l_params = experiment_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
# l_params = experiment_apd.extend_list_params(l_params, 'ptn_split', [True, False])
l_params = experiment_apd.extend_list_params(l_params, 'nb_lbs', ptn_range)
l_params = experiment_apd.extend_list_params(l_params, 'gc_regul', [0., 1e-3, 1e-1, 1e0])
logger.debug('list params: %i', len(l_params))
......@@ -228,21 +230,24 @@ def experiments_synthetic(dataset=None, nb_jobs=NB_THREADS,
exp.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_real(dict_data, dict_params=REAL_PARAMS, nb_jobs=NB_THREADS):
def experiments_real(dict_params=REAL_PARAMS):
""" run all experiments
:param nb_jobs: int
:param dict_params: {str: value}
"""
logging.basicConfig(level=logging.INFO)
dict_params.update(dict_data)
arg_params = experiment_apd.parse_params(dict_params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
nb_jobs = arg_params['nb_jobs']
dict_params.update(arg_params)
l_params = [copy.deepcopy(dict_params)]
l_params = experiment_apd.extend_l_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = experiment_apd.extend_l_params(l_params, 'ptn_split', [True, False])
l_params = experiment_apd.extend_l_params(l_params, 'gc_regul',
[0., 1e-3, 1e-2, 1e-1])
# l_params = experiment_apd.extend_l_params(l_params, 'nb_lbs',
# l_params = experiment_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
# l_params = experiment_apd.extend_list_params(l_params, 'ptn_split', [True, False])
l_params = experiment_apd.extend_list_params(l_params, 'gc_regul',
[0., 1e-9, 1e-6, 1e-3, 1e-2, 1e-1])
# l_params = experiment_apd.extend_list_params(l_params, 'nb_lbs',
# [5, 9, 12, 15, 20, 25, 30, 40])
logger.debug('list params: %i', len(l_params))
......@@ -256,37 +261,20 @@ def experiments_real(dict_data, dict_params=REAL_PARAMS, nb_jobs=NB_THREADS):
iter_vals=[9, 12, 15, 20, 25, 30])
def main_real(nb_jobs=NB_THREADS):
""" main entry point for real
:param nb_jobs: int
"""
# experiments_real(nb_jobs=1)
dict_data = experiment_apd.parse_params(REAL_PARAMS)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in dict_data.iteritems()]))
experiments_real(dict_data, nb_jobs=nb_jobs)
# datasets = ['type_{}_segm_reg_binary'.format(i) for i in range(1, 5)]
# for name in datasets:
# experiments_real(dataset=name, nb_jobs=nb_jobs)
def main():
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger.info('running...')
# test_encoding(atlas, imgs, encoding)
# test_atlasLearning(atlas, imgs, encoding)
# experiments_test()
# plt.show()
experiments_synthetic()
# main_real(nb_jobs=NB_THREADS)
experiments_real()
logger.info('DONE')
# plt.show()
if __name__ == "__main__":
......
......@@ -4,10 +4,15 @@ run experiments with Stat-of-the-art methods
Example run:
>> nohup python experiments_sta.py > ~/Medical-temp/experiments_APD-sta/nohup.log &
>> python run_experiment_apd_sta.py \
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta \
--dataset atomicPatternDictionary_v1
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import copy
import os
import sys
import logging
# to suppress all visu, has to be on the beginning
......@@ -15,11 +20,19 @@ import matplotlib
matplotlib.use('Agg')
import numpy as np
from sklearn import decomposition
from skimage import segmentation
sys.path.append(os.path.abspath(os.path.join('..','..'))) # Add path to root
import experiment_apd
import pattern_disctionary as ptn_dict
import pattern_weights as ptn_weight
logger = logging.getLogger(__name__)
SYNTH_PARAMS = experiment_apd.SYNTH_PARAMS
SYNTH_SUB_DATASETS = experiment_apd.SYNTH_SUB_DATASETS
SYNTH_PTN_RANGE = experiment_apd.SYNTH_PTN_RANGE[experiment_apd.SYNTH_DATASET_VERSION]
class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
"""
......@@ -65,6 +78,7 @@ class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
atlas_sum = np.sum(np.abs(atlas_ptns), axis=0)
# filter small values
atlas[atlas_sum < 1e-3] = 0
assert atlas.shape == atlas_ptns[0].shape
return atlas
def estim_atlas_as_unique_sum(self, atlas_ptns):
......@@ -86,7 +100,7 @@ class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
"""
atlas = self.estim_atlas_as_argmax(atlas_ptns)
# atlas = self.estim_atlas_as_unique_sum(atlas_ptns)
return atlas
self.atlas = segmentation.relabel_sequential(atlas)[0]
def _binarize_img_reconstruction(self, img_rct, thr=0.5):
""" binarise the reconstructed images to be sure again binary
......@@ -107,13 +121,18 @@ class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
:return:
"""
self.params[self.iter_var_name] = v
name_posix = '_{}_{}'.format(self.iter_var_name, v)
imgs_vec = np.array(map(np.ravel, self.imgs))
atlas_ptns, img_rct = self._perform_linear_combination(imgs_vec)
self.atlas = self._convert_patterns_to_atlas(atlas_ptns)
self._export_atlas(self.atlas, '_{}_{}'.format(self.iter_var_name, v))
img_rct = self._binarize_img_reconstruction(img_rct)
stat = self._compute_statistic_gt(self.atlas, img_rct)
stat.update({self.iter_var_name: v})
self._convert_patterns_to_atlas(atlas_ptns)
self._export_atlas(name_posix)
w_bins = [ptn_weight.weights_image_atlas_overlap_major(img, self.atlas)
for img in self.imgs]
self.w_bins = np.array(w_bins)
self._export_coding(name_posix)
img_rct = ptn_dict.reconstruct_samples(self.atlas, self.w_bins)
stat = self._compute_statistic_gt(img_rct)
stat[self.iter_var_name] = v
return stat
......@@ -155,95 +174,60 @@ class ExperimentDictLearn(ExperimentLinearComb):
self.components = self.estimator.components_
def experiments_test(dict_params=experiment_apd.SYNTH_PARAMS):
METHODS = {'PCA': ExperimentFastICA,
'ICA': ExperimentSparsePCA,
'DL': ExperimentDictLearn}
def experiments_test(params=SYNTH_PARAMS):
""" simple test of the experiments
:param dict_params: {str: value}
"""
logging.basicConfig(level=logging.DEBUG)
params = copy.deepcopy(dict_params)
# params['path_out'] += '-sta'
params['nb_runs'] = 3
params['nb_runs'] = 2
l_cls_exp = [ExperimentFastICA, ExperimentSparsePCA, ExperimentDictLearn]
for cls_expt in l_cls_exp:
for n, cls_expt in METHODS.iteritems():
logging.info('testing %s by %s', n, cls_expt.__class__)
expt = cls_expt(params)
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
def parameters_sta(dataset, dict_params=experiment_apd.SYNTH_PARAMS,
sub_dataset=experiment_apd.SYNTH_SUB_DATASETS):
""" set up the parameters
:param dataset: str
:param dict_params: {str: value}
:param sub_dataset: [str]
:return:
"""
params = dict_params.copy()
if isinstance(dataset, str):
params.update({'dataset': dataset})
params['nb_runs'] = 9
params['max_iter'] = 999
# params['path_out'] += '-sta'
l_params = [params]
l_params = experiment_apd.extend_l_params(l_params, 'sub_dataset', sub_dataset)
return l_params
def experiments_synthetic_single_run(dataset=None,
ptn_range=experiment_apd.SYNTH_PTN_RANGE,
ds_version=experiment_apd.SYNTH_DATASET_VERSION):
""" run all experiments
:param dataset: str, name of dataset
:param ptn_range: {str: [int]}
:param ds_version: str, version of dataset
"""
logging.basicConfig(level=logging.INFO)
l_params = parameters_sta(dataset)
l_cls_exp = [ExperimentFastICA, ExperimentSparsePCA, ExperimentDictLearn]
for params in l_params:
for cls_expt in l_cls_exp:
expt = cls_expt(params)
expt.run(iter_var='nb_lbs', iter_vals=ptn_range[ds_version])
def experiments_synthetic_multi_run(dataset=None,
ptn_range=experiment_apd.SYNTH_PTN_RANGE,
ds_version=experiment_apd.SYNTH_DATASET_VERSION):
def experiments_synthetic(dict_params=SYNTH_PARAMS, ptn_range=SYNTH_PTN_RANGE):
""" run all experiments
:param dataset: str, name of dataset
:param ptn_range: {str: [int]}
:param ds_version: str, version of dataset
:param ptn_range: [int]
"""
logging.basicConfig(level=logging.INFO)
l_params = parameters_sta(dataset)
l_params = experiment_apd.extend_l_params(l_params, 'nb_lbs', ptn_range[ds_version])
l_cls_exp = [ExperimentFastICA, ExperimentSparsePCA, ExperimentDictLearn]
for params in l_params:
for cls_expt in l_cls_exp:
arg_params = experiment_apd.parse_params(dict_params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
dict_params.update(arg_params)
dict_params['max_iter'] = 9999
l_params = [dict_params]
l_params = experiment_apd.extend_list_params(l_params, 'sub_dataset',
SYNTH_SUB_DATASETS)
if not 'methods' in dict_params:
dict_params['methods'] = METHODS.keys()
for m in dict_params['methods']:
cls_expt = METHODS[m]
for params in l_params:
expt = cls_expt(params)
expt.run(iter_var='case', iter_vals=range(params['nb_qruns']))
expt.run(iter_var='nb_lbs', iter_vals=ptn_range)
def main():
""" main_real entry point """
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger.info('running...')
experiments_test()
experiments_synthetic_single_run()
# experiments_test()
# experiments_synthetic_multi_run()
experiments_synthetic()
logger.info('DONE')
# plt.show()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment