Commit 0a96320a authored by Jiri Borovec's avatar Jiri Borovec

experiments design

parent b7ad72cf
......@@ -225,7 +225,7 @@ def export_visualization_image(img, i, out_dir, prefix='debug', name='',
plt.imshow(img, interpolation='none', aspect=ration)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
n_fig = '{}_ALPE_{}_iter_{:04d}.png'.format(prefix, name, i)
n_fig = 'APDL_{}_{}_iter_{:04d}.png'.format(prefix, name, i)
p_fig = os.path.join(out_dir, n_fig)
logger.debug('.. export Vusialization as "{}"'.format(p_fig))
fig.savefig(p_fig, bbox_inches='tight', pad_inches=0.05)
......@@ -390,8 +390,10 @@ def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=Non
if step_diff <= thr_step_diff:
logger.info('>> exiting while the atlas diff {} is '
'smaller then {}'.format(step_diff, thr_step_diff))
w_bins = [ptn_weight.weights_image_atlas_overlap_major(img, atlas)
for img in imgs]
break
return atlas, w_bins
return atlas, np.array(w_bins)
# TODO: Matching Pursuit
......
......@@ -2,32 +2,41 @@ import os
import sys
import time
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
from sklearn import metrics
# import src.ownUtils.toolDataIO as tD
import generate_dataset as gen_data
import dictionary_learning as dl
import ptn_disctionary as ptn_dict
import ptn_weights as ptn_weigth
import src.ownUtils.toolDataIO as tl_data
import src.ownUtils.toolExperiment as tl_expt
import logging
logger = logging.getLogger(__name__)
DEFAULT_PATH_OUTPUT = os.path.join('..','..','output')
def experiment_pipeline_alpe(atlas, imgs, encoding, p_out=DEFAULT_PATH_OUTPUT):
init_atlas_org = ptn_dict.initialise_atlas_deform_original(atlas)
# init_atlas_rnd = ptn_dict.initialise_atlas_random(atlas.shape, np.max(atlas))
init_atlas_msc = ptn_dict.initialise_atlas_mosaic(atlas.shape, np.max(atlas))
init_encode_rnd = ptn_weigth.initialise_weights_random(len(imgs), np.max(atlas))
dl.alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=init_atlas_msc,
max_iter=9, out_dir=p_out, out_prefix='defm_msc')
return None
def test_simple_case():
# DEFAULT_PATH_OUTPUT = os.path.join('..','..','output')
DEFAULT_PATH_DATA = '/datagrid/Medical/microscopy/drosophila/synthetic_data'
DEFAULT_PATH_OUTPUT = '/datagrid/temporary/Medical/experiments_APD'
DEFAULT_DATASET_NAME = 'atomicPatternDictionary_v0'
DEFAULT_PARAMS = {
'name': 'NAME',
'in_path': DEFAULT_PATH_DATA,
'dataset': DEFAULT_DATASET_NAME,
'sub_dataset': gen_data.DEFAULT_DATASET,
'out_path': DEFAULT_PATH_OUTPUT,
'init_tp': 'rnd', # msc. rnd
'max_iter': 99,
'gc_regul': 0.,
'nb_lbs': 7,
'nb_runs': 25, # 500
'gc_reinit': True,
}
def test_simple_show_case():
# implement simple case just with 2 images and 2/3 classes in atlas
atlas = gen_data.get_simple_atlas()
# atlas2 = atlas.copy()
......@@ -58,21 +67,100 @@ def test_simple_case():
plt.imshow(uc[:,:,i], vmin=0, vmax=1, interpolation='nearest')
plt.title('cost lb #{}'.format(i)), plt.colorbar()
# logger.debug(uc)
return None
return
def main():
def experiment_pipeline_alpe_showcase(p_out=DEFAULT_PATH_OUTPUT):
atlas = gen_data.dataset_create_atlas()
# plt.imshow(atlas)
# imgs = gen_data.dataset_load_images()
imgs = gen_data.dataset_load_images('datasetBinary_deform')
# plt.imshow(imgs[0])
imgs = gen_data.dataset_load_images()
# imgs = gen_data.dataset_load_images('datasetBinary_defNoise')
init_atlas_org = ptn_dict.initialise_atlas_deform_original(atlas)
init_atlas_rnd = ptn_dict.initialise_atlas_random(atlas.shape, np.max(atlas))
init_atlas_msc = ptn_dict.initialise_atlas_mosaic(atlas.shape, np.max(atlas))
init_encode_rnd = ptn_weigth.initialise_weights_random(len(imgs), np.max(atlas))
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(imgs, out_prefix='mosaic',
init_atlas=init_atlas_msc, max_iter=9, out_dir=p_out)
return
def alpe_init_atlas(im_size, nb_lbs, init_tp):
if init_tp == 'msc':
init_atlas = ptn_dict.initialise_atlas_mosaic(im_size, nb_lbs)
else:
init_atlas = ptn_dict.initialise_atlas_random(im_size, nb_lbs)
return init_atlas
def alpe_compute_stat(GT_atlas, GT_img_rct, atlas, w_bins):
ars = metrics.adjusted_rand_score(GT_atlas.ravel(), atlas.ravel())
img_rct = ptn_dict.reconstruct_samples(atlas, w_bins)
diff_rct = np.sum(abs(np.asarray(GT_img_rct) - np.asarray(img_rct)))
stat = {'atlas_ARS': ars, 'recosntruct_diff': diff_rct}
return stat
def alpe_pipe_atlas_learning_ptn_weights(i, imgs, init_atlas, params):
prefix = 'expt_{}'.format(params['init_tp'])
p_out = os.path.join(params['res_path'], 'case_{:05d}'.format(i))
if not os.path.exists(p_out):
os.mkdir(p_out)
return dl.alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=init_atlas,
gc_reinit=params['gc_reinit'], gc_coef=params['gc_regul'],
max_iter=params['max_iter'], out_dir=p_out, out_prefix=prefix)
def load_data_ground_truth(params):
p_data = os.path.join(params['in_path'], params['dataset'])
GT_atlas = gen_data.dataset_create_atlas(path_base=p_data)
GT_encoding = gen_data.dataset_load_weights(path_base=p_data)
GT_img_rct = ptn_dict.reconstruct_samples(GT_atlas, GT_encoding)
return GT_atlas, GT_img_rct
def expt_bacth_pipeline_alpe(GT_atlas, GT_img_rct, params):
tl_expt.check_exist_dirs_files(params)
params['res_path'] = tl_data.create_experiment_folder(params['out_path'],
params['name'])
p_res_stat = os.path.join(params['res_path'], tl_expt.DEFAULT_FILE_RESULTS)
tl_expt.export_params(params, p_res_stat)
p_data = os.path.join(params['in_path'], params['dataset'])
imgs = gen_data.dataset_load_images(params['sub_dataset'], p_data)
im_size = imgs[0].shape
df_stat = pd.DataFrame()
for i in range(params['nb_runs']):
init_atlas = alpe_init_atlas(im_size, params['nb_lbs'], params['init_tp'])
atlas, w_bins = alpe_pipe_atlas_learning_ptn_weights(i, imgs,
init_atlas, params)
stat = alpe_compute_stat(GT_atlas, GT_img_rct, atlas, w_bins)
df_stat = df_stat.append(stat, ignore_index=True)
df_stat.to_csv(os.path.join(params['res_path'], 'results.csv'))
logger.debug('statistic: \n{}'.format(df_stat.describe()))
f = open(p_res_stat, 'a')
f.write('\n' * 3 + 'RESULTS: \n' + '=' * 9)
f.write('\n{}'.format(df_stat.describe()))
f.close()
return df_stat
# TODO: compare error to nb classes
# TODO: other datasets
def main():
# experiment_pipeline_alpe_showcase()
encoding = gen_data.dataset_load_weights()
# logger.info('encoding: {}'.format(encoding))
params = DEFAULT_PARAMS.copy()
GT_atlas, GT_img_rct = load_data_ground_truth(params)
expt_bacth_pipeline_alpe(GT_atlas, GT_img_rct, params)
experiment_pipeline_alpe(atlas, imgs, encoding)
return None
if __name__ == "__main__":
......@@ -82,7 +170,7 @@ if __name__ == "__main__":
# test_encoding(atlas, imgs, encoding)
# test_atlasLearning(atlas, imgs, encoding)
# test_simple_case()
# test_simple_show_case()
main()
......
......@@ -61,16 +61,16 @@ def initialise_atlas_deform_original(atlas):
return np.array(res, dtype=np.int)
def reconstruct_samples(atlas, w_bin):
def reconstruct_samples(atlas, w_bins):
""" create reconstruction of binary images according given atlas and weights
:param atlas: np.array<w, h> input atlas
:param weihts: np.array<nb_imgs, nb_lbs>
:param w_bins: np.array<nb_imgs, nb_lbs>
:return: [np.array<w, h>] * nb_imgs
"""
# w_bin = np.array(weights)
w_bin_ext = np.append(np.zeros((w_bin.shape[0], 1)), w_bin, axis=1)
imgs = [None] * w_bin.shape[0]
# w_bins = np.array(weights)
w_bin_ext = np.append(np.zeros((w_bins.shape[0], 1)), w_bins, axis=1)
imgs = [None] * w_bins.shape[0]
for i, w in enumerate(w_bin_ext):
imgs[i] = np.asarray(w)[np.asarray(atlas)]
return imgs
......@@ -163,8 +163,9 @@ def reinit_atlas_likely_patterns(imgs, w_bins, atlas, lb_max=None):
imgs_rc = reconstruct_samples(atlas, w_bins)
atlas = insert_new_pattern(imgs, imgs_rc, atlas, l)
logger.debug('w_bins before: {}'.format(np.sum(w_bins[:, l_w])))
lim_repopulate = 100. / np.prod(atlas.shape)
w_bins[:, l_w] = ptn_weight.weights_label_atlas_overlap_threshold(imgs,
atlas, l, 1e-3)
atlas, l, lim_repopulate)
logger.debug('w_bins after: {}'.format(np.sum(w_bins[:, l_w])))
return atlas, w_bins
......
import numpy as np
from sklearn import metrics
import logging
from sklearn import metrics
logger = logging.getLogger(__name__)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment