Commit 781b7187 authored by Jiri Borovec's avatar Jiri Borovec

update

parent d011027e
......@@ -14,7 +14,7 @@ all data are stored on standard paths within the university datagrid
## Synthetic datasets
all experiment ar located in **experiments_alpe.py**
all experiment are located in **experiments_alpe.py**
1. simple check where the unary cost is computet correctly in 3 simple images
2. performing experiment on synthetic clear dataset
......
......@@ -71,8 +71,8 @@ def compute_relative_penaly_images_weights(l_imgs, weights):
weights_ext = np.append(np.zeros((weights.shape[0], 1)), weights, axis=1)
# logger.debug(weights_ext)
imgs = np.array(l_imgs)
logger.debug('DIMS pott: {}, l_imgs {}, w_bin: {}'.format(pott_sum.shape,
imgs.shape, weights_ext.shape))
logger.debug('DIMS potts: %s, l_imgs %s, w_bin: %s', repr(pott_sum.shape),
repr(imgs.shape), repr(weights_ext.shape))
logger.debug('... walk over all pixels in each image')
for i in range(pott_sum.shape[0]):
for j in range(pott_sum.shape[1]):
......@@ -146,19 +146,17 @@ def estimate_atlas_graphcut_simple(imgs, encodding, coef=1.):
labeling_sum = compute_positive_cost_images_weights(imgs, encodding)
unary_cost = np.array(-1 * labeling_sum , dtype=np.int32)
logger.debug('graph unaries potentials {}: \n{}'.format(unary_cost.shape,
np.histogram(unary_cost, bins=10)))
logger.debug('graph unaries potentials %s: \n %s', repr(unary_cost.shape),
repr(np.histogram(unary_cost, bins=10)))
# original and the right way..
pairwise = (1 - np.eye(labeling_sum.shape[-1])) * coef
pairwise_cost = np.array(pairwise , dtype=np.int32)
logger.debug('graph pairwise coefs {}'.format(pairwise_cost.shape))
logger.debug('graph pairwise coefs %s', repr(pairwise_cost.shape))
# run GraphCut
labels = cut_grid_graph_simple(unary_cost, pairwise_cost, algorithm='expansion')
# reshape labels
labels = labels.reshape(labeling_sum.shape[:2])
logger.debug('resulting labelling {}: \n{}'.format(labels.shape, labels))
logger.debug('resulting labelling %s: \n %s', repr(labels.shape), repr(labels))
return labels
......@@ -181,31 +179,31 @@ def estimate_atlas_graphcut_general(imgs, encoding, coef=1., init_lbs=None):
# u_cost = 1. / (labelingSum +1)
unary_cost = np.array(u_cost , dtype=np.float64)
unary_cost = unary_cost.reshape(-1, u_cost.shape[-1])
logger.debug('graph unaries potentials {}: \n{}'.format(unary_cost.shape,
np.histogram(unary_cost, bins=10)))
logger.debug('graph unaries potentials %s: \n %s', repr(unary_cost.shape),
repr(np.histogram(unary_cost, bins=10)))
edges = edges_in_image_plane(u_cost.shape[:2])
logger.debug('edges for image plane are shape {}'.format(edges.shape))
logger.debug('edges for image plane are shape %s', format(edges.shape))
edge_weights = np.ones(edges.shape[0])
logger.debug('edges weights are shape {}'.format(edge_weights.shape))
logger.debug('edges weights are shape %s', repr(edge_weights.shape))
# original and the right way...
pairwise = (1 - np.eye(u_cost.shape[-1])) * coef
pairwise_cost = np.array(pairwise , dtype=np.float64)
logger.debug('graph pairwise coefs {}'.format(pairwise_cost.shape))
logger.debug('graph pairwise coefs %s', repr(pairwise_cost.shape))
if init_lbs is None:
init_lbs = np.argmin(unary_cost, axis=1)
else:
init_lbs = init_lbs.ravel()
logger.debug('graph initial labels {}'.format(init_lbs.shape))
logger.debug('graph initial labels %s', repr(init_lbs.shape))
# run GraphCut
labels = cut_general_graph(edges, edge_weights, unary_cost, pairwise_cost,
algorithm='expansion', init_labels=init_lbs)
# reshape labels
labels = labels.reshape(u_cost.shape[:2])
logger.debug('resulting labelling {}'.format(labels.shape))
logger.debug('resulting labelling %s', repr(labels.shape))
return labels
......@@ -228,13 +226,13 @@ def export_visualization_image(img, i, out_dir, prefix='debug', name='',
plt.ylabel(labels[1])
n_fig = 'APDL_{}_{}_iter_{:04d}'.format(prefix, name, i)
p_fig = os.path.join(out_dir, n_fig + '.png')
logger.debug('.. export Vusialization as "{}...{}"'.format(p_fig[:19], p_fig[-19:]))
logger.debug('.. export Visualization as "%s...%s"', p_fig[:19], p_fig[-19:])
fig.savefig(p_fig, bbox_inches='tight', pad_inches=0.05)
plt.close()
def export_visual_atlas(i, out_dir, atlas=None, weights=None, prefix='debug'):
""" export the atlas and/or weights to output directory
""" export the atlas and/or weights to results directory
:param i: int, iteration to be shown in the img name
:param out_dir: str, path to the resulting folder
......@@ -245,7 +243,7 @@ def export_visual_atlas(i, out_dir, atlas=None, weights=None, prefix='debug'):
"""
if logger.getEffectiveLevel()==logging.DEBUG:
if not os.path.exists(out_dir):
logger.debug('output path "{}" does not exist'.format(out_dir))
logger.debug('results path "%s" does not exist', out_dir)
return None
if atlas is not None:
# export_visualization_image(atlas, i, out_dir, prefix, 'atlas',
......@@ -265,7 +263,7 @@ def alpe_initialisation(imgs, init_atlas, init_weights, out_dir, out_prefix):
:param init_atlas: np.array<w, h>
:param init_weights: np.array<nb_imgs, nb_lbs>
:param out_prefix: str
:param out_dir: str, path to the output directory
:param out_dir: str, path to the results directory
:return: np.array<w, h>, np.array<nb_imgs, nb_lbs>
"""
if init_weights is not None and init_atlas is None:
......@@ -282,8 +280,8 @@ def alpe_initialisation(imgs, init_atlas, init_weights, out_dir, out_prefix):
atlas = init_atlas
w_bins = init_weights
if len(np.unique(atlas)) == 1:
logger.info('ERROR: the atlas does not contain '
'any label... {}'.format(np.unique(atlas)))
logger.warning('the atlas does not contain any label... %s',
repr(np.unique(atlas)))
export_visual_atlas(0, out_dir, atlas, w_bins, prefix=out_prefix)
return atlas, w_bins
......@@ -335,7 +333,7 @@ def alpe_update_atlas(imgs, atlas, w_bins, lb_max, gc_coef, gc_reinit, ptn_split
:return: np.array<w, h>, float
"""
if np.sum(w_bins) == 0:
logger.info('ERROR: the w_bins is empty... {}'.format(np.unique(atlas)))
logger.warning('the w_bins is empty... %s', repr(np.unique(atlas)))
w_bins = np.array(w_bins)
# update atlas
logger.debug('... perform Atlas estimation')
......@@ -355,7 +353,7 @@ def alpe_update_atlas(imgs, atlas, w_bins, lb_max, gc_coef, gc_reinit, ptn_split
def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=None,
gc_coef=0.0, thr_step_diff=0.0, max_iter=99, gc_reinit=True,
ptn_split=True, w_ovp_m=False, out_prefix='debug', out_dir=''):
""" the experiments_synthetic_s_run pipeline for block coordinate descent algo with graphcut
""" the experiments_synthetic_single_run pipeline for block coordinate descent algo with graphcut
:param imgs: [np.array<w, h>]
:param init_atlas: np.array<w, h>
......@@ -366,7 +364,7 @@ def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=Non
:param max_iter: int, max namber of iteration
:param gc_reinit: bool, wether use atlas from previous step as init for act.
:param out_prefix: str
:param out_dir: str, path to the output directory
:param out_dir: str, path to the results directory
:return: np.array<w, h>, np.array<nb_imgs, nb_lbs>
"""
logger.info('compute an Atlas and perform images w_bins')
......@@ -379,9 +377,8 @@ def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=Non
for i in range(max_iter):
if len(np.unique(atlas)) == 1:
logger.info('ERROR: the atlas does not contain '
'any label... {}'.format(np.unique(atlas)))
logger.warning('the atlas does not contain any label... %s',
repr(np.unique(atlas)))
w_bins = alpe_update_weights(imgs, atlas, w_ovp_m)
# plt.subplot(221), plt.imshow(atlas, interpolation='nearest')
# plt.subplot(222), plt.imshow(w_bins, aspect='auto')
......@@ -392,30 +389,14 @@ def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=Non
atlas, step_diff = alpe_update_atlas(imgs, atlas, w_bins, lb_max,
gc_coef, gc_reinit, ptn_split)
logger.info('-> iter. #{} with Atlas diff {}'.format(i + 1, step_diff))
logger.info('-> iter. #%i with Atlas diff %f', (i + 1), step_diff)
export_visual_atlas(i + 1, out_dir, atlas, w_bins, prefix=out_prefix)
# stoping criterion
if step_diff <= thr_step_diff:
logger.info('>> exiting while the atlas diff {} is '
'smaller then {}'.format(step_diff, thr_step_diff))
logger.info('>> exiting while the atlas diff %f is smaller then %f',
step_diff, thr_step_diff)
w_bins = [ptn_weight.weights_image_atlas_overlap_major(img, atlas)
for img in imgs]
break
return atlas, np.array(w_bins)
# TODO: Matching Pursuit
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.OrthogonalMatchingPursuit.html
# prijde mi, ze by melo byt velmi snadne jednoduche naprogramovat variantu Matching Pursuit, kde v kazde iteraci nebudu
# maximalizovat absolutni hodnotu skalarniho produktu, ale skalarni produkt. Tim se zbavim zapornych hodnot. Pokud bych
# chtel vylozene pouze koeficienty v intervalu 0,1, tak pro kazdy vzor zjistim, jaka bude aproximacni chyba za tohoto
# omezeni. Ale prijde mi to pro nas pripad zbytecne.
# Other: http://winsty.net/onndl.html
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('DONE')
\ No newline at end of file
......@@ -14,8 +14,7 @@ import os
import time
import traceback
import logging
import copy_reg
import types
import numpy as np
import matplotlib.pylab as plt
......@@ -26,8 +25,12 @@ import dictionary_learning as dl
import ptn_disctionary as ptn_dict
import ptn_weights as ptn_weigth
import expt_apd_sta as exp_sta
logger = logging.getLogger(__name__)
NB_THREADS = exp_sta.NB_THREADS
PATH_OUTPUT = exp_sta.PATH_OUTPUT
# REQURED FOR MPROC POOL
# ISSUE: cPickle.PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
# http://stackoverflow.com/questions/25156768/cant-pickle-type-instancemethod-using-pythons-multiprocessing-pool-apply-a
......@@ -42,9 +45,7 @@ logger = logging.getLogger(__name__)
def test_simple_show_case():
"""
:return:
"""
""" """
# implement simple case just with 2 images and 2/3 classes in atlas
atlas = gen_data.get_simple_atlas()
# atlas2 = atlas.copy()
......@@ -64,7 +65,7 @@ def test_simple_show_case():
plt.imshow(img, cmap='gray', interpolation='nearest')
t = time.time()
uc = dl.compute_relative_penaly_images_weights(imgs, np.array(ws))
logger.debug('elapsed TIME: {}'.format(time.time() - t))
logger.debug('elapsed TIME: %s', repr(time.time() - t))
res = dl.estimate_atlas_graphcut_general(imgs, np.array(ws), 0.)
plt.subplot(gs[0, -1]), plt.title('result')
plt.imshow(res, cmap=cm, interpolation='nearest'), plt.colorbar()
......@@ -75,13 +76,12 @@ def test_simple_show_case():
plt.imshow(uc[:,:,i], vmin=0, vmax=1, interpolation='nearest')
plt.title('cost lb #{}'.format(i)), plt.colorbar()
# logger.debug(uc)
return
def experiment_pipeline_alpe_showcase(p_out=exp_sta.DEFAULT_PATH_OUTPUT):
def experiment_pipeline_alpe_showcase(path_out=PATH_OUTPUT):
""" an simple show case to prove that the particular steps are computed
:param p_out: str
:param path_out: str
:return:
"""
atlas = gen_data.dataset_create_atlas(path_base=exp_sta.SYNTH_PATH_APD)
......@@ -97,55 +97,10 @@ def experiment_pipeline_alpe_showcase(p_out=exp_sta.DEFAULT_PATH_OUTPUT):
init_encode_rnd = ptn_weigth.initialise_weights_random(len(imgs), np.max(atlas))
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(imgs, out_prefix='mosaic',
init_atlas=init_atlas_msc, max_iter=9, out_dir=p_out)
init_atlas=init_atlas_msc, max_iter=9, out_dir=path_out)
return atlas, w_bins
# class ALPE(object):
#
# def __init__(self, params):
# self._params = copy.deepcopy(params)
#
# def _init_atlas(self, imgs):
# """ init atlas according an param
#
# :return: np.array<w, h>
# """
# im_size = imgs[0].shape
# nb_lbs = self._params['nb_lbs']
# if self._params['init_tp'] == 'msc':
# init_atlas = ptn_dict.initialise_atlas_mosaic(im_size, nb_lbs)
# else:
# init_atlas = ptn_dict.initialise_atlas_random(im_size, nb_lbs)
# return init_atlas
#
# def estimate_atlas(self, i, imgs):
# """ set all params and run the atlas estimation in try mode
#
# :param i: int, index of try
# :param init_atlas: np.array<w, h>
# :return:
# """
# init_atlas = self._init_atlas(imgs)
# p = self._params
# # prefix = 'expt_{}'.format(p['init_tp'])
# p_out = os.path.join(p['exp_path'], 'case_{:05d}'.format(i))
# if not os.path.exists(p_out):
# os.mkdir(p_out)
# try:
# atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(imgs,
# init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
# gc_coef=p['gc_regul'], max_iter=p['max_iter'],
# ptn_split=p['ptn_split'], w_ovp_m=p['overlap_mj'],
# out_dir=p_out) # , out_prefix=prefix
# except:
# logger.error('NO atlas estimated!')
# logger.error(traceback.format_exc())
# atlas = np.zeros_like(imgs[0])
# w_bins = np.zeros((len(imgs), 1))
# return atlas, w_bins
class ExperimentALPE(exp_sta.ExperimentAPD):
"""
the main experiment or our Atlas Learning Pattern Encoding
......@@ -209,82 +164,96 @@ class ExperimentALPE(exp_sta.ExperimentAPD):
class ExperimentALPE_mp(ExperimentALPE, exp_sta.ExperimentAPD_mp):
"""
parrallel version of ALPE
parallel version of ALPE
"""
pass
def experiments_test():
""" simple test of the experiments
:return:
"""
""" simple test of the experiments """
# experiment_pipeline_alpe_showcase()
params = exp_sta.SYNTH_PARAMS.copy()
params['nb_runs'] = 3
logger.info('RUN: ExperimentALPE')
expt = ExperimentALPE(params)
expt.run(it_var='case', it_vals=range(params['nb_runs']))
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
logger.info('RUN: ExperimentALPE_mp')
expt_p = ExperimentALPE_mp(params)
expt_p.run(it_var='case', it_vals=range(params['nb_runs']))
return
expt_p.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_synthetic(dataset=None, nb_jobs=exp_sta.DEFAULT_NB_THREADS):
def experiments_synthetic(dataset=None, nb_jobs=NB_THREADS):
""" run all experiments
:return:
:param dataset: str
:param nb_jobs: int
"""
logging.basicConfig(level=logging.INFO)
params = exp_sta.SYNTH_PARAMS.copy()
if type(dataset) is str:
params.update({'dataset': dataset})
l_params = [params]
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset', exp_sta.SYNTH_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset',
exp_sta.SYNTH_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = exp_sta.extend_l_params(l_params, 'ptn_split', [True, False])
range_nb_lbs = exp_sta.SYNTH_PTN_RANGE[exp_sta.SYNTH_DATASET_VERSION]
l_params = exp_sta.extend_l_params(l_params, 'nb_lbs', range_nb_lbs)
l_params = exp_sta.extend_l_params(l_params, 'gc_regul', [0., 1e-3, 1e-1, 1e0])
logger.debug('list params: {}'.format(len(l_params)))
logger.debug('list params: %i', len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, exp_sta.DEFAULT_NB_THREADS)
exp = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
exp.run(it_var='case', it_vals=range(params['nb_runs']))
return
exp.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_real(dataset=None, nb_jobs=exp_sta.DEFAULT_NB_THREADS):
def experiments_real(dataset=None, nb_jobs=NB_THREADS):
""" run all experiments
:return:
:param dataset: str
:param nb_jobs: int
"""
logging.basicConfig(level=logging.INFO)
params = exp_sta.REAL_PARAMS.copy()
if type(dataset) is str:
if isinstance(dataset, str):
params.update({'dataset': dataset})
l_params = [params]
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset', exp_sta.REAL_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset',
exp_sta.REAL_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = exp_sta.extend_l_params(l_params, 'ptn_split', [True, False])
l_params = exp_sta.extend_l_params(l_params, 'nb_lbs', range(5, 12, 2) + range(15, 35, 4))
l_params = exp_sta.extend_l_params(l_params, 'nb_lbs',
range(5, 12, 3) + range(12, 35, 5))
l_params = exp_sta.extend_l_params(l_params, 'gc_regul', [0., 1e-3, 1e-1, 1e0])
logger.debug('list params: {}'.format(len(l_params)))
logger.debug('list params: %i', len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, exp_sta.DEFAULT_NB_THREADS)
exp = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
exp.run(gt=False, it_var='case', it_vals=range(params['nb_runs']))
return
exp.run(gt=False, iter_var='case', iter_vals=range(params['nb_runs']))
def main():
# experiments_synthetic()
# experiments_real(nb_jobs=1)
datasets = ['type_{}_segm_reg_binary'.format(i) for i in range(1, 5)]
for name in datasets:
experiments_real(name, nb_jobs=1)
if __name__ == "__main__":
......@@ -293,12 +262,9 @@ if __name__ == "__main__":
# test_encoding(atlas, imgs, encoding)
# test_atlasLearning(atlas, imgs, encoding)
# experiments_test()
experiments_synthetic()
experiments_test()
# experiments_real(nb_jobs=1)
# experiments_real('1000_imgs_binary')
# main()
logger.info('DONE')
plt.show()
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -15,7 +15,7 @@ def initialise_atlas_random(im_size, max_lb):
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas {} as random labeling'.format(im_size))
logger.debug('initialise atlas %s as random labeling', repr(im_size))
nb_lbs = max_lb + 1
im = np.random.randint(1, nb_lbs, im_size)
return np.array(im, dtype=np.int)
......@@ -29,10 +29,10 @@ def initialise_atlas_mosaic(im_size, max_lb):
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas {} as grid labeling'.format(im_size))
logger.debug('initialise atlas %s as grid labeling', repr(im_size))
nb_lbs = max_lb
block = np.ones(np.ceil(im_size / np.array(nb_lbs, dtype=np.float)))
logger.debug('block size is {}'.format(block.shape))
logger.debug('block size is %s', repr(block.shape))
for l in range(nb_lbs):
idx = np.random.permutation(range(1, nb_lbs + 1))
for k in range(nb_lbs):
......@@ -43,8 +43,8 @@ def initialise_atlas_mosaic(im_size, max_lb):
row = np.hstack((row, b))
if l == 0: mosaic = row
else: mosaic = np.vstack((mosaic, row))
logger.debug('generated mosaic {} with labeling'.format(mosaic.shape,
np.unique(mosaic).tolist()))
logger.debug('generated mosaic %s with labeling %s',
repr(mosaic.shape), repr(np.unique(mosaic).tolist()))
im = mosaic[:im_size[0], :im_size[1]]
return np.array(im, dtype=np.int)
......@@ -130,7 +130,7 @@ def insert_new_pattern(imgs, imgs_rc, atlas, lb):
# logger.debug('new im_ptn: {}'.format(np.sum(im_ptn) / np.prod(im_ptn.shape)))
# plt.imshow(im_ptn), plt.title('im_ptn'), plt.show()
atlas[im_ptn == True] = lb
logger.debug('area of new pattern is {}'.format(np.sum(atlas == lb)))
logger.debug('area of new pattern is %i', np.sum(atlas == lb))
return atlas
......@@ -146,27 +146,27 @@ def reinit_atlas_likely_patterns(imgs, w_bins, atlas, lb_max=None):
if lb_max is None:
lb_max = max(np.max(atlas), w_bins.shape[1])
else:
logger.debug('compare w_bin {} to max {}'.format(w_bins.shape, lb_max))
logger.debug('compare w_bin %s to max %i', repr(w_bins.shape), lb_max)
for i in range(w_bins.shape[1], lb_max):
logger.debug('adding disappeared weigh column {}'.format(i))
logger.debug('adding disappeared weigh column %i', i)
w_bins = np.append(w_bins, np.zeros((w_bins.shape[0], 1)), axis=1)
w_bin_ext = np.append(np.zeros((w_bins.shape[0], 1)), w_bins, axis=1)
logger.debug('IN > sum over weights: {}'.format(np.sum(w_bin_ext, axis=0)))
logger.debug('IN > sum over weights: %s', repr(np.sum(w_bin_ext, axis=0)))
# add one while indexes does not cover 0 - bg
logger.debug('total nb labels: {}'.format(lb_max))
for l in range(1, lb_max + 1):
l_w = l - 1
w_sum = np.sum(w_bins[:, l_w])
logger.debug('reinit lb: {} with weight sum {}'.format(l, w_sum))
logger.debug('total nb labels: %i', lb_max)
for lb in range(1, lb_max + 1):
lb_w = lb - 1
w_sum = np.sum(w_bins[:, lb_w])
logger.debug('reinit lb: %i with weight sum %i', lb, w_sum)
if w_sum > 0:
continue
imgs_rc = reconstruct_samples(atlas, w_bins)
atlas = insert_new_pattern(imgs, imgs_rc, atlas, l)
logger.debug('w_bins before: {}'.format(np.sum(w_bins[:, l_w])))
atlas = insert_new_pattern(imgs, imgs_rc, atlas, lb)
logger.debug('w_bins before: %i', np.sum(w_bins[:, lb_w]))
lim_repopulate = 100. / np.prod(atlas.shape)
w_bins[:, l_w] = ptn_weight.weights_label_atlas_overlap_threshold(imgs,
atlas, l, lim_repopulate)
logger.debug('w_bins after: {}'.format(np.sum(w_bins[:, l_w])))
w_bins[:, lb_w] = ptn_weight.weights_label_atlas_overlap_threshold(imgs,
atlas, lb, lim_repopulate)
logger.debug('w_bins after: %i', np.sum(w_bins[:, lb_w]))
return atlas, w_bins
......@@ -178,31 +178,25 @@ def atlas_split_indep_ptn(atlas, lb_max):
:return:
"""
l_ptns = []
for l in np.unique(atlas):
labeled, nb_objects = ndimage.label(atlas == l)
logger.debug('for lb {} detected #{}'.format(l, nb_objects))
for lb in np.unique(atlas):
labeled, nb_objects = ndimage.label(atlas == lb)
logger.debug('for label %i detected #%i', lb, nb_objects)
ptn = [(labeled == j) for j in np.unique(labeled)]
# skip the largest one assuming to be background
l_ptns += sorted(ptn, key=lambda x: np.sum(x), reverse=True)[1:]
l_ptns = sorted(l_ptns, key=lambda x: np.sum(x), reverse=True)
logger.debug('list of all areas {}'.format([np.sum(p) for p in l_ptns]))
logger.debug('list of all areas %s', repr([np.sum(p) for p in l_ptns]))
atlas_new = np.zeros(atlas.shape, dtype=np.int)
# take just lb_max largest elements
for i, ptn in enumerate(l_ptns[:lb_max]):
l = i + 1
logger.debug('pattern #{} area {}'.format(l, np.sum(ptn)))
lb = i + 1
logger.debug('pattern #%i area %i', lb, np.sum(ptn))
# plt.subplot(1,lb_max,l), plt.imshow(ptn), plt.colorbar()
atlas_new[ptn] = l
atlas_new[ptn] = lb
# plt.figure()
# plt.subplot(121), plt.imshow(atlas), plt.colorbar()
# plt.subplot(122), plt.imshow(atlas_new), plt.colorbar()
# plt.show()
logger.debug('atlas unique {}'.format(np.unique(atlas_new)))
logger.debug('atlas unique %s', repr(np.unique(atlas_new)))
return atlas_new
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('DONE')
\ No newline at end of file
......@@ -13,8 +13,8 @@ def initialise_weights_random(nb_imgs, nb_lbs, ratio_sel=0.2):
1 means all and 0 means none
:return: np.array<nb_imgs, nb_lbs>
"""
logger.debug('initialise weights for {} images and {} labels '
'as random selection'.format(nb_imgs, nb_lbs))
logger.debug('initialise weights for %i images and %i labels '
'as random selection', nb_imgs, nb_lbs)
prob = np.random.random((nb_imgs, nb_lbs))
weights = np.zeros_like(prob)
weights[prob <= ratio_sel] = 1
......@@ -27,8 +27,8 @@ def convert_weights_binary2indexes(weights):
:param weights: np.array<nb_imgs, nb_lbs>
:return: [[int, ...]] * nb_imgs
"""
logger.debug('convert binary weights {} '
'to list of indexes with True'.format(weights.shape))
logger.debug('convert binary weights %s to list of indexes with True',
repr(weights.shape))
# if type(weights)==np.ndarray: weights = weights.tolist()
w_idx = [None] * weights.shape[0]
for i in range(weights.shape[0]):
......@@ -108,8 +108,3 @@ def weights_label_atlas_overlap_threshold(imgs, atlas, lb, thr=1e-3):
weight[i] = 1
return np.array(weight)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('DONE')
\ No newline at end of file
......@@ -8,31 +8,39 @@ and then decide while the segmentation is likey to be correct of not
import os
import glob
import logging
import multiprocessing as mproc
import itertools
import shutil
import multiprocessing as mproc
# to suppress all visu, has to be on the beginning
import matplotlib
matplotlib.use('Agg')
import numpy as np
from skimage import io
from skimage import io, morphology
import matplotlib.pyplot as plt
import pandas as pd
logger = logging.getLogger(__name__)
PATH_BASE = '/datagrid/Medical/microscopy/drosophila/'
# PATH_SEGM = os.path.join(PATH_BASE, 'TEMPORARY/orig_segm')
PATH_SEGM = os.path.join(PATH_BASE, 'real_segmentations/orig_segm')
PATH_SEGM = os.path.join(PATH_BASE, 'RESULTS/orig_segm')
PATH_VISU = os.path.join(PATH_BASE, 'TEMPORARY/orig_visu')
# PATH_SEGM = os.path.join(PATH_BASE, 'real_segmentations/stage_4_segm')
# PATH_VISU = os.path.join(PATH_BASE, 'real_segmentations/stage_4_visu')
NB_JOBS = mproc.cpu_count()
CSV_SEGM_GOOD = 'segm_good.csv'
CSV_SEGM_FAIL = 'segm_fail.csv'
FIG_STAT = 'stat_segm_labels.jpeg'
PREFIX_VISU_SEGM = 'visu_segm_'
# size around image borders
NB_IMG_CORNER = 50
# ration how much backround has to be around borders
THR_CORNER_BG = 0.95
# ration for total num,ber of backround
THT_BACKGROUND = 0.95
# ration of bacground in object convex hull
THT_CONVEX = 0.85
def labels_ration(p_seg):
......@@ -51,9 +59,13 @@ def labels_ration(p_seg):
seg[:, :NB_IMG_CORNER].ravel(),
seg[:, -NB_IMG_CORNER:].ravel()))
r_bg = np.sum(seg_border == 0) / float(seg_border.shape[0])
seg_fg = morphology.binary_closing(seg > 0, morphology.disk(30))
obj_convex = morphology.convex_hull_object(seg_fg)
obj_bg = np.sum(seg_fg[obj_convex] > 0) / float(np.sum(obj_convex))
return {'name': n_seg,
'lb_hist': d_lb_hist,
'r_bg': r_bg}
'r_bg': r_bg,
'r_cx': obj_bg}
def plot_histo_labels(d_hist, p_dir=''):
......@@ -61,7 +73,7 @@ def plot_histo_labels(d_hist, p_dir=''):
:param d_hist: {int: float}
"""
logger.debug('plotting results')
logger.info('plotting stat. results')
fig = plt.figure()
for lb in d_hist:
plt.plot(d_hist[lb], '+', label=str(lb))
......@@ -81,9 +93,8 @@ def read_make_hist(p_segs):
:return:[str, {int: float}] list or pairs with image name
and relative label histogram
"""
nb_jobs = mproc.cpu_count()
logger.debug('run in %i threads...', nb_jobs)
mproc_pool = mproc.Pool(nb_jobs)
logger.debug('run in %i threads...', NB_JOBS)
mproc_pool = mproc.Pool(NB_JOBS)
l_desc = mproc_pool.map(labels_ration, p_segs)
mproc_pool.close()
mproc_pool.join()
......@@ -100,7 +111,7 @@ def merge_hist_stat(l_n_hist):
logger.debug('merge partial results...')
l_hist = [l['lb_hist'] for l in l_n_hist]
lbs = itertools.chain(*[h.keys() for h in l_hist])
u_lbs = np.unique(lbs).tolist()
u_lbs = np.unique(list(lbs)).tolist()
d_hist = {lb: [h[lb] for h in l_hist if lb in h]
for lb in u_lbs}
# join the foregrounds
......@@ -109,6 +120,9 @@ def merge_hist_stat(l_n_hist):
d_hist['fg'].append(hist.get(1, 0) + hist.get(2, 0))
logger.debug('compute statistic...')
for lb, vals in d_hist.iteritems():
if len(vals) == 0:
logger.warning('label %s has no values to compute', str(lb))
continue
logger.info('label %s with mean %f, median %f, std %f',
str(lb), np.mean(vals), np.median(vals), np.std(vals))