Commit 6e62a79b authored by Jiri Borovec's avatar Jiri Borovec

update

parent 40b39d27
......@@ -210,7 +210,7 @@ def dictionary_generate_atlas(path_out, dir_name=DIR_NAME_DICTIONARY,
logger.debug(np.unique(atlas))
export_image(out_dir, atlas, 'atlas')
# in case run in DEBUG show atlas and wait till close
if logger.getEffectiveLevel() == logging.DEBUG:
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logger.debug('labels: %s', repr(np.unique(atlas)))
if atlas.ndim == 2:
plt.imshow(atlas)
......
This diff is collapsed.
......@@ -49,10 +49,10 @@ PATH_RESULTS = '/datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_AP
DEFAULT_PARAMS = {
'computer': os.uname(),
'nb_samples': None,
'init_tp': 'rnd', # msc. rnd
'max_iter': 100,
'init_tp': 'msc', # msc. rnd
'max_iter': 150,
'gc_regul': 1e-2,
'nb_labels': 15,
'nb_labels': 20,
'nb_runs': NB_THREADS, # 500
'gc_reinit': True,
'ptn_split': True,
......
......@@ -13,8 +13,6 @@ import numpy as np
import dataset_utils as data
import pattern_weights as ptn_weight
logger = logging.getLogger(__name__)
def initialise_atlas_random(im_size, max_lb):
""" initialise atlas with random labels
......@@ -23,13 +21,13 @@ def initialise_atlas_random(im_size, max_lb):
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas %s as random labeling', repr(im_size))
logging.debug('initialise atlas %s as random labeling', repr(im_size))
nb_lbs = max_lb + 1
im = np.random.randint(1, nb_lbs, im_size)
return np.array(im, dtype=np.int)
def initialise_atlas_mosaic(im_size, max_lb):
def initialise_atlas_mosaic(im_size, max_lb, coef=1.):
""" generate grids trusture and into each rectangle plase a label,
each row contains all labels (permutation)
......@@ -37,12 +35,13 @@ def initialise_atlas_mosaic(im_size, max_lb):
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas %s as grid labeling', repr(im_size))
nb_lbs = max_lb
logging.debug('initialise atlas %s as grid labeling', repr(im_size))
nb_lbs = int(max_lb * coef)
block = np.ones(np.ceil(im_size / np.array(nb_lbs, dtype=np.float)))
logger.debug('block size is %s', repr(block.shape))
vec = range(1, nb_lbs + 1) * int(np.ceil(coef))
logging.debug('block size is %s', repr(block.shape))
for l in range(nb_lbs):
idx = np.random.permutation(range(1, nb_lbs + 1))
idx = np.random.permutation(vec)[:nb_lbs]
for k in range(nb_lbs):
b = block.copy() * idx[k]
if k == 0:
......@@ -51,7 +50,7 @@ def initialise_atlas_mosaic(im_size, max_lb):
row = np.hstack((row, b))
if l == 0: mosaic = row
else: mosaic = np.vstack((mosaic, row))
logger.debug('generated mosaic %s with labeling %s',
logging.debug('generated mosaic %s with labeling %s',
repr(mosaic.shape), repr(np.unique(mosaic).tolist()))
im = mosaic[:im_size[0], :im_size[1]]
return np.array(im, dtype=np.int)
......@@ -64,7 +63,7 @@ def initialise_atlas_deform_original(atlas):
:param atlas: np.array<w, h>
:return: np.array<w, h>
"""
logger.debug('initialise atlas by deforming original one')
logging.debug('initialise atlas by deforming original one')
res = data.image_deform_elastic(atlas)
return np.array(res, dtype=np.int)
......@@ -136,10 +135,10 @@ def insert_new_pattern(imgs, imgs_rc, atlas, lb):
diff = np.sum((im - im_rc) > 0)
diffs.append(diff)
im_ptn = prototype_new_pattern(imgs, imgs_rc, diffs, atlas)
# logger.debug('new im_ptn: {}'.format(np.sum(im_ptn) / np.prod(im_ptn.shape)))
# logging.debug('new im_ptn: {}'.format(np.sum(im_ptn) / np.prod(im_ptn.shape)))
# plt.imshow(im_ptn), plt.title('im_ptn'), plt.show()
atlas[im_ptn == True] = lb
logger.debug('area of new pattern is %i', np.sum(atlas == lb))
logging.debug('area of new pattern is %i', np.sum(atlas == lb))
return atlas
......@@ -156,27 +155,27 @@ def reinit_atlas_likely_patterns(imgs, w_bins, atlas, label_max=None):
if label_max is None:
label_max = max(np.max(atlas), w_bins.shape[1])
else:
logger.debug('compare w_bin %s to max %i', repr(w_bins.shape), label_max)
logging.debug('compare w_bin %s to max %i', repr(w_bins.shape), label_max)
for i in range(w_bins.shape[1], label_max):
logger.debug('adding disappeared weigh column %i', i)
logging.debug('adding disappeared weigh column %i', i)
w_bins = np.append(w_bins, np.zeros((w_bins.shape[0], 1)), axis=1)
w_bin_ext = np.append(np.zeros((w_bins.shape[0], 1)), w_bins, axis=1)
logger.debug('IN > sum over weights: %s', repr(np.sum(w_bin_ext, axis=0)))
logging.debug('IN > sum over weights: %s', repr(np.sum(w_bin_ext, axis=0)))
# add one while indexes does not cover 0 - bg
logger.debug('total nb labels: %i', label_max)
logging.debug('total nb labels: %i', label_max)
for lb in range(1, label_max + 1):
w_index = lb - 1
w_sum = np.sum(w_bins[:, w_index])
logger.debug('reinit lb: %i with weight sum %i', lb, w_sum)
logging.debug('reinit lb: %i with weight sum %i', lb, w_sum)
if w_sum > 0:
continue
imgs_rc = reconstruct_samples(atlas, w_bins)
atlas = insert_new_pattern(imgs, imgs_rc, atlas, lb)
logger.debug('w_bins before: %i', np.sum(w_bins[:, w_index]))
logging.debug('w_bins before: %i', np.sum(w_bins[:, w_index]))
lim_repopulate = 100. / np.prod(atlas.shape)
w_bins[:, w_index] = ptn_weight.weights_label_atlas_overlap_threshold(imgs,
atlas, lb, lim_repopulate)
logger.debug('w_bins after: %i', np.sum(w_bins[:, w_index]))
logging.debug('w_bins after: %i', np.sum(w_bins[:, w_index]))
return atlas, w_bins
......@@ -190,17 +189,17 @@ def atlas_split_indep_ptn(atlas, lb_max):
patterns = []
for lb in np.unique(atlas):
labeled, nb_objects = ndimage.label(atlas == lb)
logger.debug('for label %i detected #%i', lb, nb_objects)
logging.debug('for label %i detected #%i', lb, nb_objects)
ptn = [(labeled == j) for j in np.unique(labeled)]
# skip the largest one assuming to be background
patterns += sorted(ptn, key=lambda x: np.sum(x), reverse=True)[1:]
patterns = sorted(patterns, key=lambda x: np.sum(x), reverse=True)
logger.debug('list of all areas %s', repr([np.sum(p) for p in patterns]))
logging.debug('list of all areas %s', repr([np.sum(p) for p in patterns]))
atlas_new = np.zeros(atlas.shape, dtype=np.int)
# take just lb_max largest elements
for i, ptn in enumerate(patterns[:lb_max]):
lb = i + 1
logger.debug('pattern #%i area %i', lb, np.sum(ptn))
logging.debug('pattern #%i area %i', lb, np.sum(ptn))
# plt.subplot(1,lb_max,l), plt.imshow(ptn), plt.colorbar()
atlas_new[ptn] = lb
......@@ -208,5 +207,5 @@ def atlas_split_indep_ptn(atlas, lb_max):
# plt.subplot(121), plt.imshow(atlas), plt.colorbar()
# plt.subplot(122), plt.imshow(atlas_new), plt.colorbar()
# plt.show()
logger.debug('atlas unique %s', repr(np.unique(atlas_new)))
logging.debug('atlas unique %s', repr(np.unique(atlas_new)))
return atlas_new
......@@ -7,8 +7,6 @@ import logging
import numpy as np
logger = logging.getLogger(__name__)
def initialise_weights_random(nb_imgs, nb_lbs, ratio_sel=0.2):
"""
......@@ -18,7 +16,7 @@ def initialise_weights_random(nb_imgs, nb_lbs, ratio_sel=0.2):
1 means all and 0 means none
:return: np.array<nb_imgs, nb_lbs>
"""
logger.debug('initialise weights for %i images and %i labels '
logging.debug('initialise weights for %i images and %i labels '
'as random selection', nb_imgs, nb_lbs)
prob = np.random.random((nb_imgs, nb_lbs))
weights = np.zeros_like(prob)
......@@ -32,7 +30,7 @@ def convert_weights_binary2indexes(weights):
:param weights: np.array<nb_imgs, nb_lbs>
:return: [[int, ...]] * nb_imgs
"""
logger.debug('convert binary weights %s to list of indexes with True',
logging.debug('convert binary weights %s to list of indexes with True',
repr(weights.shape))
# if type(weights) is np.ndarray: weights = weights.tolist()
w_index = [None] * weights.shape[0]
......@@ -53,7 +51,7 @@ def weights_image_atlas_overlap_major(img, atlas):
:param atlas: np.array<w, h>
:return: [int] * nb_lbs of values {0, 1}
"""
# logger.debug('weights input image according given atlas')
# logging.debug('weights input image according given atlas')
weights = weights_image_atlas_overlap_threshold(img, atlas, 0.5)
return weights
......@@ -64,7 +62,7 @@ def weights_image_atlas_overlap_partial(img, atlas):
:param atlas: np.array<w, h>
:return: [int] * nb_lbs of values {0, 1}
"""
# logger.debug('weights input image according given atlas')
# logging.debug('weights input image according given atlas')
labels = np.unique(atlas).tolist()
weights = weights_image_atlas_overlap_threshold(img, atlas, (1. / len(labels)))
return weights
......@@ -79,10 +77,10 @@ def weights_image_atlas_overlap_threshold(img, atlas, thr=0.5):
:param thr: float, represent the ration between overlapping and non pixels
:return: [int] * nb_lbs of values {0, 1}
"""
# logger.debug('weights input image according given atlas')
# logging.debug('weights input image according given atlas')
# simple weight
labels = np.unique(atlas).tolist()
# logger.debug('weights image by atlas with labels: {}'.format(lbs))
# logging.debug('weights image by atlas with labels: {}'.format(lbs))
if 0 in labels:
labels.remove(0)
weight = [0] * np.max(atlas)
......
......@@ -206,9 +206,9 @@ def perform_reconstruction_mproc(dict_params, name_csv, df_encode, img_atlas,
nb_jobs=NB_THREADS):
path_out = os.path.join(dict_params['path_exp'],
name_csv.replace(PREFIX_ENCODE, PREFIX_RECONST))
if not os.path.exists(path_out):
os.mkdir(path_out)
if VISUAL:
if not os.path.exists(path_out):
os.mkdir(path_out)
export_fig_atlas(path_out, 'atlas', img_atlas)
list_patterns = [col for col in df_encode.columns if col.startswith('ptn')]
logger.debug('list of pattern names: %s', repr(list_patterns))
......
......@@ -21,7 +21,7 @@ import logging
import matplotlib
matplotlib.use('Agg')
import numpy as np
from sklearn import decomposition
from sklearn.decomposition import SparsePCA, FastICA, DictionaryLearning
from skimage import segmentation
import tqdm
......@@ -147,8 +147,10 @@ class ExperimentFastICA(ExperimentLinearCombine):
"""
def _estimate_linear_combination(self, imgs_vec):
self.estimator = decomposition.FastICA(n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'), whiten=True)
self.estimator = FastICA(n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'),
algorithm='deflation',
whiten=True)
self.fit_result = self.estimator.fit_transform(imgs_vec)
self.components = self.estimator.mixing_.T
......@@ -159,8 +161,9 @@ class ExperimentSparsePCA(ExperimentLinearCombine):
"""
def _estimate_linear_combination(self, imgs_vec):
self.estimator = decomposition.SparsePCA(n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'))#, n_jobs=-1
self.estimator = SparsePCA(n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'),
n_jobs=1)
self.fit_result = self.estimator.fit_transform(imgs_vec)
self.components = self.estimator.components_
......@@ -171,10 +174,12 @@ class ExperimentDictLearn(ExperimentLinearCombine):
"""
def _estimate_linear_combination(self, imgs_vec):
self.estimator = decomposition.DictionaryLearning(fit_algorithm='lars',
transform_algorithm='omp', split_sign=False,
n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'))
self.estimator = DictionaryLearning(fit_algorithm='lars',
transform_algorithm='omp',
split_sign=False,
n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'),
n_jobs=1)
self.fit_result = self.estimator.fit_transform(imgs_vec)
self.components = self.estimator.components_
......@@ -183,7 +188,7 @@ METHODS = {
'PCA': ExperimentFastICA,
'ICA': ExperimentSparsePCA,
'DL': ExperimentDictLearn,
'APDL': expt_apdl.ExperimentALPE_mp,
'APDL': expt_apdl.ExperimentAPDL,
}
......@@ -213,7 +218,7 @@ def experiments_synthetic(params=SYNTH_PARAMS):
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
params.update(arg_params)
params['max_iter'] = 9999
# params['max_iter'] = 9999
l_params = [params]
# l_params = expt_apd.extend_list_params(l_params, 'nb_samples',
# np.linspace(0.1, 1, 10).tolist())
......
......@@ -19,6 +19,7 @@ import traceback
import logging
import copy
import gc
from functools import partial
import matplotlib
matplotlib.use('Agg')
......@@ -102,7 +103,15 @@ def experiment_pipeline_alpe_showcase(path_out):
return atlas, w_bins
class ExperimentALPE(expt_apd.ExperimentAPD):
DICT_ATLAS_INIT = {
'msc': ptn_dict.initialise_atlas_mosaic,
'msc1': partial(ptn_dict.initialise_atlas_mosaic, coef=1.5),
'msc2': partial(ptn_dict.initialise_atlas_mosaic, coef=2),
'rnd': ptn_dict.initialise_atlas_random,
}
class ExperimentALPE_raw(expt_apd.ExperimentAPD):
"""
the main real experiment or our Atlas Learning Pattern Encoding
"""
......@@ -115,10 +124,9 @@ class ExperimentALPE(expt_apd.ExperimentAPD):
:return: np.array<w, h>
"""
im_size = self.imgs[0].shape
if init_tp == 'msc':
init_atlas = ptn_dict.initialise_atlas_mosaic(im_size, nb_lbs)
else:
init_atlas = ptn_dict.initialise_atlas_random(im_size, nb_lbs)
assert init_tp in DICT_ATLAS_INIT
fn_init_atlas = DICT_ATLAS_INIT[init_tp]
init_atlas = fn_init_atlas(im_size, nb_lbs)
return init_atlas
def _estimate_atlas(self, v):
......@@ -129,24 +137,24 @@ class ExperimentALPE(expt_apd.ExperimentAPD):
:return: np.array, np.array
"""
logging.info(' -> estimate atlas...')
p = self.params.copy()
p[self.iter_var_name] = v
logging.debug('PARAMS: %s', repr(p))
init_atlas = self._init_atlas(p['nb_labels'], p['init_tp'])
self.params[self.iter_var_name] = v
logging.debug('PARAMS: %s', repr(self.params))
init_atlas = self._init_atlas(self.params['nb_labels'], self.params['init_tp'])
# prefix = 'expt_{}'.format(p['init_tp'])
path_out = os.path.join(p['path_exp'],
'log_{}_{}'.format(self.iter_var_name, v))
if not os.path.exists(path_out):
os.mkdir(path_out)
path_out = os.path.join(self.params['path_exp'],
'debug_{}_{}'.format(self.iter_var_name, v))
if isinstance(self.params['nb_samples'], float):
self.params['nb_samples'] = int(len(self.imgs) * self.params['nb_samples'])
try:
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(
self.imgs[:self.params['nb_samples']],
init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
gc_coef=p['gc_regul'], max_iter=p['max_iter'],
ptn_split=p['ptn_split'], overlap_major=p['overlap_mj'],
out_dir=path_out) # , out_prefix=prefix
self.imgs[:self.params['nb_samples']],
init_atlas=init_atlas,
gc_reinit=self.params['gc_reinit'],
gc_coef=self.params['gc_regul'],
max_iter=self.params['max_iter'],
ptn_split=self.params['ptn_split'],
overlap_major=self.params['overlap_mj'],
out_dir=path_out) # , out_prefix=prefix
except:
logging.error('FAILED, no atlas estimated!')
logging.error(traceback.format_exc())
......@@ -177,7 +185,7 @@ class ExperimentALPE(expt_apd.ExperimentAPD):
return stat
class ExperimentALPE_mp(ExperimentALPE, expt_apd.ExperimentAPD_mp):
class ExperimentAPDL(ExperimentALPE_raw, expt_apd.ExperimentAPD_mp):
"""
parallel version of ALPE
"""
......@@ -191,15 +199,21 @@ def experiments_test(dict_params=SYNTH_PARAMS):
params = copy.deepcopy(dict_params)
params['nb_runs'] = 3
logging.info('RUN: ExperimentALPE')
expt = ExperimentALPE(params)
logging.info('RUN: ExperimentALPE_raw')
expt = ExperimentALPE_raw(params)
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
logging.info('RUN: ExperimentALPE_mp')
expt_p = ExperimentALPE_mp(params)
expt_p = ExperimentAPDL(params)
expt_p.run(iter_var='case', iter_vals=range(params['nb_runs']))
# INIT_TYPES = ['msc', 'rnd']
# GRAPHCUT_REGUL = [0.0, 1e-3, 0.1]
INIT_TYPES = DICT_ATLAS_INIT.keys()
GRAPHCUT_REGUL = [0., 0e-12, 1e-9, 1e-6, 1e-3, 1e-1, 1.0]
def experiments_synthetic(params=SYNTH_PARAMS):
""" run all experiments
......@@ -213,15 +227,13 @@ def experiments_synthetic(params=SYNTH_PARAMS):
arg_params = expt_apd.parse_params(params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
nb_jobs = arg_params['nb_jobs']
params.update(arg_params)
l_params = [params]
l_params = expt_apd.extend_list_params(l_params, 'dataset', SYNTH_SUB_DATASETS)
l_params = expt_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = expt_apd.extend_list_params(l_params, 'init_tp', INIT_TYPES)
l_params = expt_apd.extend_list_params(l_params, 'ptn_split', [True, False])
l_params = expt_apd.extend_list_params(l_params, 'gc_regul',
[0., 1e-6, 1e-3, 1e-1, 1e0])
l_params = expt_apd.extend_list_params(l_params, 'gc_regul', GRAPHCUT_REGUL)
ptn_range = SYNTH_PTN_RANGE[os.path.basename(params['path_in'])]
l_params = expt_apd.extend_list_params(l_params, 'nb_labels', ptn_range)
......@@ -230,10 +242,10 @@ def experiments_synthetic(params=SYNTH_PARAMS):
tqdm_bar = tqdm.tqdm(total=len(l_params))
for params in l_params:
try:
if nb_jobs > 1:
expt = ExperimentALPE_mp(params, nb_jobs)
if params['nb_jobs'] > 1:
expt = ExperimentAPDL(params, params['nb_jobs'])
else:
expt = ExperimentALPE(params)
expt = ExperimentALPE_raw(params)
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
# exp.run(iter_var='nb_labels', iter_vals=ptn_range)
del expt
......@@ -251,7 +263,6 @@ def experiments_real(params=REAL_PARAMS):
arg_params = expt_apd.parse_params(params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
nb_jobs = arg_params['nb_jobs']
params.update(arg_params)
l_params = [copy.deepcopy(params)]
......@@ -265,10 +276,10 @@ def experiments_real(params=REAL_PARAMS):
tqdm_bar = tqdm.tqdm(total=len(l_params))
for params in l_params:
if nb_jobs > 1:
expt = ExperimentALPE_mp(params, nb_jobs)
if params['nb_jobs'] > 1:
expt = ExperimentAPDL(params, params['nb_jobs'])
else:
expt = ExperimentALPE(params)
expt = ExperimentALPE_raw(params)
# exp.run(gt=False, iter_var='case', iter_values=range(params['nb_runs']))
expt.run(gt=False, iter_var='nb_labels',
iter_vals=[9, 12, 15, 20, 25, 30])
......@@ -277,7 +288,7 @@ def experiments_real(params=REAL_PARAMS):
def main():
logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
logging.info('running...')
# test_encoding(atlas, imgs, encoding)
......@@ -285,7 +296,6 @@ def main():
# experiments_test()
# plt.show()
arg_params = expt_apd.parse_params(SYNTH_PARAMS)
if arg_params['type'] == 'synth':
experiments_synthetic()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment