Commit 9c7d76df authored by Jiri Borovec's avatar Jiri Borovec

fix & update

parent 50dfb691
......@@ -437,7 +437,7 @@ def mproc_load_images(list_path_img):
return list_names_imgs
def dataset_load_images(path_base, name, im_pattern='*', im_posix=IMAGE_POSIX,
def dataset_load_images(path_dir, im_pattern='*', im_posix=IMAGE_POSIX,
nb_spls=None, nb_jobs=1):
""" load complete dataset or just a subset
......@@ -449,13 +449,13 @@ def dataset_load_images(path_base, name, im_pattern='*', im_posix=IMAGE_POSIX,
:param nb_jobs: int
:return: [np.array], [str]
"""
path_dir = os.path.join(path_base, name)
logger.info('loading folder (%s) <- "%s"', os.path.exists(path_dir), path_dir)
assert os.path.exists(path_dir)
path_search = os.path.join(path_dir, im_pattern + im_posix)
logger.debug('image search "%s"', path_search)
path_imgs = glob.glob(path_search)
logger.debug('number spls %i in dataset "%s"', len(path_imgs), name)
logger.debug('number spls %i in dataset "%s"', len(path_imgs),
os.path.basename(path_dir))
path_imgs = sorted(path_imgs)[:nb_spls]
if nb_jobs > 1:
......@@ -523,7 +523,7 @@ def dataset_create_atlas(path_base, name=DIR_NAME_DICTIONARY, img_temp_name='pat
:param img_temp_name: str
:return: np.array<w, h>
"""
imgs, _ = dataset_load_images(path_base, name, img_temp_name)
imgs, _ = dataset_load_images(os.path.join(path_base, name), img_temp_name)
assert len(imgs) > 0
atlas = np.zeros_like(imgs[0])
for i, im in enumerate(imgs):
......
......@@ -275,20 +275,20 @@ def alpe_initialisation(imgs, init_atlas, init_weights, out_dir, out_prefix):
if init_weights is not None and init_atlas is None:
logger.debug('... initialise Atlas from w_bins')
init_atlas = estimate_atlas_graphcut_general(imgs, init_weights, 0.)
export_visual_atlas(0, out_dir, weights=init_weights, prefix=out_prefix)
export_visual_atlas(0, out_dir, init_atlas, out_prefix)
if init_atlas is None:
max_nb_lbs = int(np.sqrt(len(imgs)))
logger.debug('... initialise Atlas with ')
# IDEA: find better way of initialisation
init_atlas = ptn_dict.initialise_atlas_mosaic(imgs[0].shape, max_nb_lbs)
export_visual_atlas(0, out_dir, atlas=init_atlas, prefix=out_prefix)
export_visual_atlas(0, out_dir, init_atlas, out_prefix)
atlas = init_atlas
w_bins = init_weights
if len(np.unique(atlas)) == 1:
logger.warning('the atlas does not contain any label... %s',
repr(np.unique(atlas)))
export_visual_atlas(0, out_dir, atlas, w_bins, prefix=out_prefix)
export_visual_atlas(0, out_dir, atlas, out_prefix)
return atlas, w_bins
......@@ -398,7 +398,7 @@ def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=Non
gc_coef, gc_reinit, ptn_split)
logger.info('-> iter. #%i with Atlas diff %f', (i + 1), step_diff)
export_visual_atlas(i + 1, out_dir, atlas, w_bins, prefix=out_prefix)
export_visual_atlas(i + 1, out_dir, atlas, out_prefix)
# stoping criterion
if step_diff <= thr_step_diff:
......
......@@ -58,19 +58,18 @@ NB_THREADS = int(mproc.cpu_count() * .9)
PATH_RESULTS = os.path.join(PATH_OUTPUT, 'experiments_APD_temp')
DEFAULT_PARAMS = {
'computer': os.uname(),
'nb_samples': None, # msc. rnd
'init_tp': 'msc', # msc. rnd
'max_iter': 39,
'gc_regul': 1e-2,
'nb_lbs': 12,
'nb_labels': 12,
'nb_runs': 25, # 500
'gc_reinit': True,
'ptn_split': True,
'overlap_mj': False,
}
SYNTH_DATASET_VERSION = 'v0'
SYNTH_DATASET_TEMP = 'atomicPatternDictionary_'
SYNTH_DATASET_NAME = SYNTH_DATASET_TEMP + SYNTH_DATASET_VERSION
SYNTH_DATASET_NAME = 'atomicPatternDictionary_v0'
SYNTH_PATH_APD = os.path.join(PATH_DATA_SYNTH, SYNTH_DATASET_NAME)
SYNTH_SUB_DATASETS = ['datasetBinary_raw',
'datasetBinary_noise',
......@@ -78,14 +77,13 @@ SYNTH_SUB_DATASETS = ['datasetBinary_raw',
'datasetBinary_defNoise']
SYNTH_PARAMS = DEFAULT_PARAMS.copy()
SYNTH_PARAMS.update({'data_type': 'synthetic',
'path_in': PATH_DATA_SYNTH,
'dataset': SYNTH_DATASET_NAME,
'sub_dataset': SYNTH_SUB_DATASETS[0],
'path_in': SYNTH_PATH_APD,
'dataset': SYNTH_SUB_DATASETS[0],
'path_out': PATH_RESULTS})
SYNTH_PTN_RANGE = {'v0': range(2, 14, 1),
'v1': range(5, 20, 1),
'v2': range(10, 40, 2),
'v3': range(10, 40, 2),}
SYNTH_PTN_RANGE = {'atomicPatternDictionary_v0': range(2, 14, 1),
'atomicPatternDictionary_v1': range(5, 20, 1),
'atomicPatternDictionary_v2': range(10, 40, 2),
'atomicPatternDictionary_v3': range(10, 40, 2),}
# SYNTH_RESULTS_NAME = 'experiments_APD'
REAL_DATASET_NAME = '1000_images_improved_binary'
......@@ -97,9 +95,8 @@ REAL_SUB_DATASETS = [
]
REAL_PARAMS = DEFAULT_PARAMS.copy()
REAL_PARAMS.update({'data_type': 'real',
'path_in': PATH_DATA_REAL,
'dataset': REAL_DATASET_NAME,
'sub_dataset': REAL_SUB_DATASETS[0],
'path_in': os.path.join(PATH_DATA_REAL, REAL_DATASET_NAME),
'dataset': REAL_SUB_DATASETS[0],
'path_out': PATH_RESULTS,
'max_iter': 50,
'nb_runs': 10})
......@@ -121,12 +118,9 @@ def create_args_parser(dict_params):
default=dict_params['path_out'])
parser.add_argument('-n', '--name', type=str, required=False,
help='specific name', default=None)
parser.add_argument('--dataset', type=str, required=False,
parser.add_argument('--dataset', type=str, required=False, # nargs='+',
help='name of dataset to be used',
default=dict_params['dataset'])
parser.add_argument('--sub_dataset', type=str, required=False, # nargs='+',
help='path to the input image annotation',
default=dict_params['sub_dataset'])
parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS,
help='number of processes running in parallel')
parser.add_argument('--methods', type=int, required=False, nargs='+', default=None,
......@@ -145,7 +139,7 @@ def parse_arg_params(parser):
args = {k: args[k] for k in args if args[k] is not None}
for n in (k for k in args if 'path' in k and args[k] is not None):
args[n] = os.path.abspath(os.path.expanduser(args[n]))
assert os.path.exists(args[n])
assert os.path.exists(args[n]), '%s' % args[n]
return args
......@@ -157,11 +151,9 @@ def parse_params(default_params):
return params
# TODO: add option to estimate over reduced number of samples
class ExperimentAPD(tl_expt.Experiment):
"""
main_real class for APD experiments State-of-the-Art and ALPE
main class for APD experiments State-of-the-Art and ALPE
"""
def __init__(self, dict_params):
......@@ -171,8 +163,8 @@ class ExperimentAPD(tl_expt.Experiment):
"""
if not 'name' in dict_params:
dict_params['name'] = '{}_{}_{}'.format(dict_params['data_type'],
dict_params['dataset'],
dict_params['sub_dataset'])
os.path.basename(dict_params['path_in']),
dict_params['dataset'])
if not os.path.exists(dict_params['path_out']):
os.mkdir(dict_params['path_out'])
super(ExperimentAPD, self).__init__(dict_params)
......@@ -190,9 +182,8 @@ class ExperimentAPD(tl_expt.Experiment):
:param params: {str: ...}, parameter settings
"""
path_data = os.path.join(self.params.get('path_in'), self.params.get('dataset'))
self.gt_atlas = gen_data.dataset_create_atlas(path_data)
gt_encoding = gen_data.dataset_load_weights(path_data)
self.gt_atlas = gen_data.dataset_create_atlas(self.params.get('path_in'))
gt_encoding = gen_data.dataset_load_weights(self.params.get('path_in'))
self.gt_img_rct = ptn_dict.reconstruct_samples(self.gt_atlas, gt_encoding)
def _load_data(self, gt=True):
......@@ -204,14 +195,13 @@ class ExperimentAPD(tl_expt.Experiment):
if gt:
self._load_data_ground_truth()
self._load_images()
self.imgs = [im.astype(np.uint8) for im in self.imgs]
def _load_images(self):
""" load image data """
path_data = os.path.join(self.params.get('path_in'),
self.params.get('dataset'))
self.imgs, self._im_names = gen_data.dataset_load_images(path_data,
self.params.get('sub_dataset'))
self.imgs, self._im_names = gen_data.dataset_load_images(path_data)
self.imgs = [im.astype(np.uint8) for im in self.imgs]
def run(self, gt=True, iter_var='case', iter_vals=range(1)):
""" the main_real procedure that load, perform and evaluete experiment
......@@ -276,7 +266,7 @@ class ExperimentAPD(tl_expt.Experiment):
path_csv = os.path.join(self.params.get('path_exp'), n_csv)
if not hasattr(self, '_im_names'):
self._im_names = map(str, range(self.w_bins.shape[0]))
df = pd.DataFrame(data=self.w_bins, index=self._im_names)
df = pd.DataFrame(data=self.w_bins, index=self._im_names[:len(self.w_bins)])
df.columns = ['ptn {}'.format(lb + 1) for lb in df.columns]
df.index.name = 'image'
df.to_csv(path_csv)
......@@ -299,7 +289,7 @@ class ExperimentAPD(tl_expt.Experiment):
if hasattr(self, 'gt_img_rct') and img_rct is not None:
# img_rct = ptn_dict.reconstruct_samples(self.atlas, self.w_bins)
# img_rct = self._binarize_img_reconstruction(img_rct)
diff = np.asarray(self.gt_img_rct) - np.asarray(img_rct)
diff = np.asarray(self.gt_img_rct[:len(img_rct)]) - np.asarray(img_rct)
stat['reconstruct_diff'] = \
np.sum(abs(diff)) / np.prod(np.asarray(self.gt_img_rct).shape)
return stat
......@@ -344,7 +334,7 @@ class ExperimentAPD_mp(ExperimentAPD):
path_data = os.path.join(self.params.get('path_in'),
self.params.get('dataset'))
self.imgs, self._im_names = gen_data.dataset_load_images(path_data,
self.params.get('sub_dataset'), nb_jobs=self.nb_jobs)
nb_jobs=self.nb_jobs)
def _warp_perform_once(self, v):
self.params[self.iter_var_name] = v
......
#!/bin/bash
# STATE-OF-THE-ART methods
# python run_experiment_apd_sta.py \
# -in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v0 \
# -out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta
# OUR method
python run_experiment_apd_apdl.py \
-in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v0 \
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APDL_synth
python run_experiment_apd_apdl.py \
-in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v1 \
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APDL_synth
python run_experiment_apd_apdl.py \
-in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v2 \
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APDL_synth
python run_experiment_apd_apdl.py \
-in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v3 \
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APDL_synth
\ No newline at end of file
......@@ -142,7 +142,7 @@ def load_segmentation(dict_params, img_name):
:return: np.array<height, width>
"""
path_img = os.path.join(dict_params['path_in'], dict_params['dataset'],
dict_params['sub_dataset'], img_name + '.png')
img_name + '.png')
img = np.array(Image.open(path_img))
img /= img.max()
return img
......
......@@ -13,6 +13,7 @@ Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
# to suppress all visu, has to be on the beginning
import os
import sys
import time
import traceback
import logging
......@@ -23,7 +24,9 @@ matplotlib.use('Agg')
import numpy as np
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
import tqdm
sys.path.append(os.path.abspath(os.path.join('..','..'))) # Add path to root
import dataset_utils as gen_data
import dictionary_learning as dl
import pattern_disctionary as ptn_dict
......@@ -36,7 +39,7 @@ NB_THREADS = experiment_apd.NB_THREADS
PATH_OUTPUT = experiment_apd.PATH_OUTPUT
SYNTH_PARAMS = experiment_apd.SYNTH_PARAMS
SYNTH_SUB_DATASETS = experiment_apd.SYNTH_SUB_DATASETS
SYNTH_PTN_RANGE = experiment_apd.SYNTH_PTN_RANGE[experiment_apd.SYNTH_DATASET_VERSION]
SYNTH_PTN_RANGE = experiment_apd.SYNTH_PTN_RANGE
REAL_PARAMS = experiment_apd.REAL_PARAMS
......@@ -74,18 +77,18 @@ def test_simple_show_case():
# logger.debug(uc)
def experiment_pipeline_alpe_showcase(path_in=experiment_apd.SYNTH_PATH_APD,
path_out=PATH_OUTPUT):
def experiment_pipeline_alpe_showcase(path_out=PATH_OUTPUT):
""" an simple show case to prove that the particular steps are computed
:param path_in: str
:param path_out: str
:return:
"""
atlas = gen_data.dataset_create_atlas(path_in)
atlas = gen_data.dataset_create_atlas(experiment_apd.SYNTH_PATH_APD)
# plt.imshow(atlas)
imgs, _ = gen_data.dataset_load_images(path_in, gen_data.NAME_DATASET)
path_in = os.path.join(experiment_apd.SYNTH_PATH_APD, gen_data.NAME_DATASET)
imgs, _ = gen_data.dataset_load_images(path_in)
# imgs = gen_data.dataset_load_images('datasetBinary_defNoise',
# path_base=SYNTH_PATH_APD)
......@@ -130,14 +133,17 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
p = self.params.copy()
p[self.iter_var_name] = v
logger.debug('PARAMS: %s', repr(p))
init_atlas = self._init_atlas(p['nb_lbs'], p['init_tp'])
init_atlas = self._init_atlas(p['nb_labels'], p['init_tp'])
# prefix = 'expt_{}'.format(p['init_tp'])
path_out = os.path.join(p['path_exp'],
'log_{}_{}'.format(self.iter_var_name, v))
if not os.path.exists(path_out):
os.mkdir(path_out)
if isinstance(self.params['nb_samples'], float):
self.params['nb_samples'] = int(len(self.imgs) * self.params['nb_samples'])
try:
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(self.imgs,
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(
self.imgs[:self.params['nb_samples']],
init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
gc_coef=p['gc_regul'], max_iter=p['max_iter'],
ptn_split=p['ptn_split'], overlap_major=p['overlap_mj'],
......@@ -146,9 +152,10 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
logger.error('FAILED, no atlas estimated!')
logger.error(traceback.format_exc())
atlas = np.zeros_like(self.imgs[0])
w_bins = np.zeros((len(self.imgs), 1))
w_bins = np.zeros((len(self.imgs), 0))
assert atlas.max() == w_bins.shape[1]
return atlas, w_bins
self.atlas = atlas
self.w_bins = w_bins
def _perform_once(self, v):
""" perform single experiment
......@@ -157,7 +164,7 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
:return: {str: ...}
"""
logger.info('perform single experiment...')
self.atlas, self.w_bins = self._estimate_atlas(v)
self._estimate_atlas(v)
logger.debug('atlas of size %s and labels %s', repr(self.atlas.shape),
repr(np.unique(self.atlas).tolist()))
logger.debug('weights of size %s and summing %s', repr(self.w_bins.shape),
......@@ -165,8 +172,8 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
name_posix = '_{}_{}'.format(self.iter_var_name, v)
self._export_atlas(name_posix)
self._export_coding(name_posix)
self.img_rct = ptn_dict.reconstruct_samples(self.atlas, self.w_bins)
stat = self._compute_statistic_gt(self.atlas, self.img_rct)
img_rct = ptn_dict.reconstruct_samples(self.atlas, self.w_bins)
stat = self._compute_statistic_gt(img_rct)
stat[self.iter_var_name] = v
return stat
......@@ -194,8 +201,7 @@ def experiments_test(dict_params=experiment_apd.SYNTH_PARAMS):
expt_p.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_synthetic(dataset=None, dict_params=SYNTH_PARAMS,
sub_datasets=SYNTH_SUB_DATASETS, ptn_range=SYNTH_PTN_RANGE):
def experiments_synthetic(params=SYNTH_PARAMS):
""" run all experiments
:param dataset: str, name of dataset
......@@ -205,60 +211,64 @@ def experiments_synthetic(dataset=None, dict_params=SYNTH_PARAMS,
:param ptn_range: [int]
:param ds_version: str, version of dataset
"""
arg_params = experiment_apd.parse_params(dict_params)
arg_params = experiment_apd.parse_params(params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
nb_jobs = arg_params['nb_jobs']
params = copy.deepcopy(dict_params)
if isinstance(dataset, str):
params.update({'dataset': dataset})
params.update(arg_params)
l_params = [params]
l_params = experiment_apd.extend_list_params(l_params, 'sub_dataset', sub_datasets)
# l_params = experiment_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
# l_params = experiment_apd.extend_list_params(l_params, 'ptn_split', [True, False])
l_params = experiment_apd.extend_list_params(l_params, 'nb_lbs', ptn_range)
l_params = experiment_apd.extend_list_params(l_params, 'gc_regul', [0., 1e-3, 1e-1, 1e0])
l_params = experiment_apd.extend_list_params(l_params, 'dataset', SYNTH_SUB_DATASETS)
l_params = experiment_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = experiment_apd.extend_list_params(l_params, 'ptn_split', [True, False])
ptn_range = SYNTH_PTN_RANGE[os.path.basename(params['path_in'])]
l_params = experiment_apd.extend_list_params(l_params, 'nb_labels', ptn_range)
l_params = experiment_apd.extend_list_params(l_params, 'gc_regul',
[0., 1e-6, 1e-3, 1e-1, 1e0])
logger.debug('list params: %i', len(l_params))
tqdm_bar = tqdm.tqdm(total=len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
exp.run(iter_var='case', iter_vals=range(params['nb_runs']))
tqdm_bar.update(1)
def experiments_real(dict_params=REAL_PARAMS):
def experiments_real(params=REAL_PARAMS):
""" run all experiments
:param nb_jobs: int
:param dict_params: {str: value}
:param params: {str: value}
"""
arg_params = experiment_apd.parse_params(dict_params)
arg_params = experiment_apd.parse_params(params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
nb_jobs = arg_params['nb_jobs']
dict_params.update(arg_params)
params.update(arg_params)
l_params = [copy.deepcopy(dict_params)]
l_params = [copy.deepcopy(params)]
# l_params = experiment_apd.extend_list_params(l_params, 'init_tp', ['msc', 'rnd'])
# l_params = experiment_apd.extend_list_params(l_params, 'ptn_split', [True, False])
l_params = experiment_apd.extend_list_params(l_params, 'gc_regul',
[0., 1e-9, 1e-6, 1e-3, 1e-2, 1e-1])
# l_params = experiment_apd.extend_list_params(l_params, 'nb_lbs',
# l_params = experiment_apd.extend_list_params(l_params, 'nb_labels',
# [5, 9, 12, 15, 20, 25, 30, 40])
logger.debug('list params: %i', len(l_params))
tqdm_bar = tqdm.tqdm(total=len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
# exp.run(gt=False, iter_var='case', iter_values=range(params['nb_runs']))
exp.run(gt=False, iter_var='nb_lbs',
exp.run(gt=False, iter_var='nb_labels',
iter_vals=[9, 12, 15, 20, 25, 30])
tqdm_bar.update(1)
def main():
......@@ -272,7 +282,7 @@ def main():
experiments_synthetic()
experiments_real()
# experiments_real()
logger.info('DONE')
......
......@@ -5,8 +5,8 @@ Example run:
>> nohup python experiments_sta.py > ~/Medical-temp/experiments_APD-sta/nohup.log &
>> python run_experiment_apd_sta.py \
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta \
--dataset atomicPatternDictionary_v1
-in /datagrid/Medical/microscopy/drosophila/synthetic_data/atomicPatternDictionary_v1 \
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD-sta
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
......@@ -21,8 +21,9 @@ matplotlib.use('Agg')
import numpy as np
from sklearn import decomposition
from skimage import segmentation
import tqdm
sys.path.append(os.path.abspath(os.path.join('..','..'))) # Add path to root
sys.path.append(os.path.abspath(os.path.join('..','..'))) # Add path to root
import experiment_apd
import pattern_disctionary as ptn_dict
import pattern_weights as ptn_weight
......@@ -31,10 +32,10 @@ logger = logging.getLogger(__name__)
SYNTH_PARAMS = experiment_apd.SYNTH_PARAMS
SYNTH_SUB_DATASETS = experiment_apd.SYNTH_SUB_DATASETS
SYNTH_PTN_RANGE = experiment_apd.SYNTH_PTN_RANGE[experiment_apd.SYNTH_DATASET_VERSION]
SYNTH_PTN_RANGE = experiment_apd.SYNTH_PTN_RANGE
class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
# experiment_apd.ExperimentAPD_mp
class ExperimentLinearCombine(experiment_apd.ExperimentAPD_mp):
"""
State-of-te-Art methods that are based on Linear Combination
"""
......@@ -60,8 +61,7 @@ class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
atlas_ptns = self.components.reshape((-1, ) + self.imgs[0].shape)
rct_vec = np.dot(self.fit_result, self.components)
rct_imgs = rct_vec.reshape(np.asarray(self.imgs).shape)
return atlas_ptns, rct_imgs
return atlas_ptns, rct_vec
def estim_atlas_as_argmax(self, atlas_ptns):
"""
......@@ -122,12 +122,15 @@ class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
"""
self.params[self.iter_var_name] = v
name_posix = '_{}_{}'.format(self.iter_var_name, v)
imgs_vec = np.array(map(np.ravel, self.imgs))
atlas_ptns, img_rct = self._perform_linear_combination(imgs_vec)
if isinstance(self.params['nb_samples'], float):
self.params['nb_samples'] = int(len(self.imgs) * self.params['nb_samples'])
imgs_vec = np.array(map(np.ravel, self.imgs[:self.params['nb_samples']]))
atlas_ptns, rct_vec = self._perform_linear_combination(imgs_vec)
# img_rct = rct_vec.reshape(np.asarray(self.imgs[:self.params['nb_samples']]).shape)
self._convert_patterns_to_atlas(atlas_ptns)
self._export_atlas(name_posix)
w_bins = [ptn_weight.weights_image_atlas_overlap_major(img, self.atlas)
for img in self.imgs]
for img in self.imgs[:self.params['nb_samples']]]
self.w_bins = np.array(w_bins)
self._export_coding(name_posix)
img_rct = ptn_dict.reconstruct_samples(self.atlas, self.w_bins)
......@@ -136,31 +139,31 @@ class ExperimentLinearComb(experiment_apd.ExperimentAPD_mp):
return stat
class ExperimentFastICA(ExperimentLinearComb):
class ExperimentFastICA(ExperimentLinearCombine):
"""
http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.FastICA.html
"""
def _estimate_linear_combination(self, imgs_vec):
self.estimator = decomposition.FastICA(n_components=self.params.get('nb_lbs'),
self.estimator = decomposition.FastICA(n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'), whiten=True)
self.fit_result = self.estimator.fit_transform(imgs_vec)
self.components = self.estimator.mixing_.T
class ExperimentSparsePCA(ExperimentLinearComb):
class ExperimentSparsePCA(ExperimentLinearCombine):
"""
http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.SparsePCA.html
"""
def _estimate_linear_combination(self, imgs_vec):
self.estimator = decomposition.SparsePCA(n_components=self.params.get('nb_lbs'),
self.estimator = decomposition.SparsePCA(n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'))#, n_jobs=-1
self.fit_result = self.estimator.fit_transform(imgs_vec)
self.components = self.estimator.components_
class ExperimentDictLearn(ExperimentLinearComb):
class ExperimentDictLearn(ExperimentLinearCombine):
"""
http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.DictionaryLearning.html
"""
......@@ -168,7 +171,7 @@ class ExperimentDictLearn(ExperimentLinearComb):
def _estimate_linear_combination(self, imgs_vec):
self.estimator = decomposition.DictionaryLearning(fit_algorithm='lars',
transform_algorithm='omp', split_sign=False,
n_components=self.params.get('nb_lbs'),
n_components=self.params.get('nb_labels'),
max_iter=self.params.get('max_iter'))
self.fit_result = self.estimator.fit_transform(imgs_vec)
self.components = self.estimator.components_
......@@ -187,6 +190,7 @@ def experiments_test(params=SYNTH_PARAMS):
logging.basicConfig(level=logging.DEBUG)
params['nb_runs'] = 2
params['nb_samples'] = 0.5
for n, cls_expt in METHODS.iteritems():
logging.info('testing %s by %s', n, cls_expt.__class__)
......@@ -194,30 +198,34 @@ def experiments_test(params=SYNTH_PARAMS):
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_synthetic(dict_params=SYNTH_PARAMS, ptn_range=SYNTH_PTN_RANGE):
def experiments_synthetic(params=SYNTH_PARAMS):
""" run all experiments
:param dataset: str, name of dataset
:param ptn_range: [int]
"""
arg_params = experiment_apd.parse_params(dict_params)
arg_params = experiment_apd.parse_params(params)
logging.info('PARAMS: \n%s', '\n'.join(['"{}": \n\t {}'.format(k, v)
for k, v in arg_params.iteritems()]))
dict_params.update(arg_params)
dict_params['max_iter'] = 9999
l_params = [dict_params]
l_params = experiment_apd.extend_list_params(l_params, 'sub_dataset',
params.update(arg_params)
params['max_iter'] = 9999
l_params = [params]
l_params = experiment_apd.extend_list_params(l_params, 'nb_samples',
np.linspace(0.1, 1, 10).tolist())
l_params = experiment_apd.extend_list_params(l_params, 'dataset',
SYNTH_SUB_DATASETS)
if not 'methods' in dict_params:
dict_params['methods'] = METHODS.keys()
if not 'methods' in params:
params['methods'] = METHODS.keys()
ptn_range = SYNTH_PTN_RANGE[os.path.basename(params['path_in'])]
for m in dict_params['methods']:
for m in params['methods']:
cls_expt = METHODS[m]
tqdm_bar = tqdm.tqdm(total=len(l_params))
for params in l_params:
expt = cls_expt(params)
expt.run(iter_var='nb_lbs', iter_vals=ptn_range)
expt.run(iter_var='nb_labels', iter_vals=ptn_range)
tqdm_bar.update(1)
def main():
......
......@@ -57,7 +57,7 @@ DEFAULT_PARAMS = {
'sp_size': 20,
'sp_regul': 0.15,
'gc_regul': 0.9, # 1.2
'nb_lbs': 3,
'nb_labels': 3,
'fts': {'clr': ['median', 'std', 'eng']}, # {'clr': ['mean', 'std', 'eng']}
'clr': 'rgb', # 'lab'
'visu': True,
......@@ -330,7 +330,7 @@ def segment_image(params, dict_paths, p_im):
logger.debug('img values range: %f - %f', np.min(img), np.max(img))
d_debug = dict() if b_debug else None
t = time.time()
seg_raw = segm.pipe_clr2d_spx_fts_gmm_gc(img, nb_cls=params['nb_lbs'],
seg_raw = segm.pipe_clr2d_spx_fts_gmm_gc(img, nb_cls=params['nb_labels'],
color=params['clr'], sp_size=params['sp_size'],
sp_reg=params['sp_regul'], gc_reg=params['gc_regul'],
ls_fts=params['fts'], gc_tp='w_edge', d_debug=d_debug)
......
......@@ -99,7 +99,7 @@ def recompute_encoding(atlas, path_csv):
path_expt = os.path.dirname(path_csv)
config = load_config_json(path_expt)
path_in = os.path.join(config.get('path_in'), config.get('dataset'))
imgs, im_names = gen_data.dataset_load_images(path_in, config.get('sub_dataset'))
imgs, im_names = gen_data.dataset_load_images(path_in)
weights = [ptn_weight.weights_image_atlas_overlap_major(img, atlas) for img in imgs]
df = pd.DataFrame(data=np.array(weights), index=im_names)
df.columns = ['ptn {}'.format(lb + 1) for lb in df.columns]
......
......@@ -34,8 +34,7 @@ IMAGE_PATTERN = '*'
DEFAULT_PARAMS = {
# 'computer': os.uname(),
'path_in': os.path.join(DEFAULT_PATH_DATA, 'RESULTS'),
'dataset': REAL_DATASET_NAME,
'path_in': os.path.join(DEFAULT_PATH_DATA, 'RESULTS', REAL_DATASET_NAME),
'path_out': os.path.join(DEFAULT_PATH_DATA, 'TEMPORARY'),
# 'binary': ['fix', 'otsu', 'adapt'],
'binary': ['3cls'],
......@@ -175,14 +174,15 @@ def binarize_all(params=DEFAULT_PARAMS, im_pattern=IMAGE_PATTERN, nb_jobs=NB_THR
:return:
"""
imgs, names = gen_data.dataset_load_images(params['path_in'], params['dataset'],
im_pattern=im_pattern, nb_jobs=nb_jobs)
imgs, names = gen_data.dataset_load_images(params['path_in'], im_pattern=im_pattern,
nb_jobs=nb_jobs)
logger.info('loaded {} images of size {}'.format(len(imgs), imgs[0].shape))
imgs = extend_images(imgs)
imgs = crop_images(imgs, 1e-3)
gc.collect(), time.sleep(1)
path_export = os.path.join(params['path_out'], params['dataset'] + BINARY_POSIX)
path_export = os.path.join(params['path_out'],
os.path.basename(params['path_in']) + BINARY_POSIX)
if not os.path.exists(path_export):
os.mkdir(path_export)
logger.debug('exporting path: %s', path_export)
......@@ -236,7 +236,8 @@ def main():
params = DEFAULT_PARAMS.copy()
datasets = ['type_{}_segm_reg'.format(i) for i in range(1, 5)]
for ds in datasets:
params['dataset'] = ds
params['path_in'] = os.path.join(DEFAULT_PATH_DATA, 'RESULTS',
REAL_DATASET_NAME, ds)
binarize_all(params)
logger.info('DONE')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment