Commit 12c99fce by Jiri Borovec

refactoring

parent a6e875cf
"""
The main module for Atomic pattern disctionary, jioning the atlas estimation
and computing the encoding / weights
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
import logging
import skimage.segmentation as sk_image
import pattern_disctionary as ptn_dict
import pattern_weights as ptn_weight
import metric_similarity as sim_metric
import generate_dataset as gen_data
logger = logging.getLogger(__name__)
import dataset_utils as gen_data
logger = logging.getLogger(__name__)
DEFAULT_UNARY_BACKGROUND = 1
......@@ -56,24 +63,24 @@ DEFAULT_UNARY_BACKGROUND = 1
# return pott_sum_norm
def compute_relative_penaly_images_weights(l_imgs, weights):
def compute_relative_penaly_images_weights(imgs, weights):
""" compute the relative penalty for all pixel and cjsing each label
on that particular position
:param l_imgs: [np.array<w, h>]
:param imgs: [np.array<w, h>]
:param weights: np.array<nb_imgs, nb_lbs>
:return: np.array<w, h, nb_lbs>
"""
logger.info('compute unary cost from images and related weights')
# weightsIdx = ptn_weight.convert_weights_binary2indexes(weights)
nb_lbs = weights.shape[1] + 1
assert len(l_imgs) == weights.shape[0]
pott_sum = np.zeros(l_imgs[0].shape + (nb_lbs,))
assert len(imgs) == weights.shape[0]
pott_sum = np.zeros(imgs[0].shape + (nb_lbs,))
# extenf the weights by background value 0
weights_ext = np.append(np.zeros((weights.shape[0], 1)), weights, axis=1)
# logger.debug(weights_ext)
imgs = np.array(l_imgs)
logger.debug('DIMS potts: %s, l_imgs %s, w_bin: %s', repr(pott_sum.shape),
imgs = np.array(imgs)
logger.debug('DIMS potts: %s, imgs %s, w_bin: %s', repr(pott_sum.shape),
repr(imgs.shape), repr(weights_ext.shape))
logger.debug('... walk over all pixels in each image')
for i in range(pott_sum.shape[0]):
......@@ -82,7 +89,7 @@ def compute_relative_penaly_images_weights(l_imgs, weights):
img_vals = np.repeat(imgs[:, i, j, np.newaxis],
weights_ext.shape[1], axis=1)
pott_sum[i, j] = np.sum(abs(weights_ext - img_vals), axis=0)
pott_sum_norm = pott_sum / float(len(l_imgs))
pott_sum_norm = pott_sum / float(len(imgs))
return pott_sum_norm
......@@ -133,12 +140,12 @@ def edges_in_image_plane(im_size):
return edges
def estimate_atlas_graphcut_simple(imgs, encodding, coef=1.):
def estimate_atlas_graphcut_simple(imgs, encoding, coef=1.):
""" run the graphcut to estimate atlas from computed unary terms
source: https://github.com/yujiali/pygco
:param imgs: [np.array<w, h>] list of input binary images
:param encodding: np.array<nb_imgs, nb_lbs> binary ptn selection
:param encoding: np.array<nb_imgs, nb_lbs> binary ptn selection
:param coef: coefficient for graphcut
:return:
"""
......@@ -146,7 +153,7 @@ def estimate_atlas_graphcut_simple(imgs, encodding, coef=1.):
# source: https://github.com/yujiali/pygco
from src.wrappers.GraphCut.pygco import cut_grid_graph_simple
labeling_sum = compute_positive_cost_images_weights(imgs, encodding)
labeling_sum = compute_positive_cost_images_weights(imgs, encoding)
unary_cost = np.array(-1 * labeling_sum , dtype=np.int32)
logger.debug('graph unaries potentials %s: \n %s', repr(unary_cost.shape),
repr(np.histogram(unary_cost, bins=10)))
......@@ -167,7 +174,7 @@ def estimate_atlas_graphcut_general(imgs, encoding, coef=1., init_lbs=None):
source: https://github.com/yujiali/pygco
:param imgs: [np.array<w, h>] list of input binary images
:param encodding: np.array<nb_imgs, nb_lbs> binary ptn selection
:param encoding: np.array<nb_imgs, nb_lbs> binary ptn selection
:param coef: coefficient for graphcut
:param init_lbs: np.array<nb_seg, 1> init labeling
while None it take the arg ming of the unary costs
......@@ -210,7 +217,7 @@ def estimate_atlas_graphcut_general(imgs, encoding, coef=1., init_lbs=None):
def export_visualization_image(img, i, out_dir, prefix='debug', name='',
ration=None, labels=['', '']):
ration=None, labels=('', '')):
""" export visualisation as an image with some special desc.
:param img: np.array<w, h>
......@@ -232,7 +239,7 @@ def export_visualization_image(img, i, out_dir, prefix='debug', name='',
plt.close()
def export_visual_atlas(i, out_dir, atlas=None, weights=None, prefix='debug'):
def export_visual_atlas(i, out_dir, atlas=None, prefix='debug'):
""" export the atlas and/or weights to results directory
:param i: int, iteration to be shown in the img name
......@@ -285,7 +292,7 @@ def alpe_initialisation(imgs, init_atlas, init_weights, out_dir, out_prefix):
return atlas, w_bins
def alpe_update_weights(imgs, atlas, ovp_m=False):
def alpe_update_weights(imgs, atlas, overlap_major=False):
""" single iteration of the block coordinate descent algo
:param imgs: [np.array<w, h>]
......@@ -294,7 +301,7 @@ def alpe_update_weights(imgs, atlas, ovp_m=False):
"""
# update w_bins
logger.debug('... perform pattern Weights')
if ovp_m:
if overlap_major:
w_bins = [ptn_weight.weights_image_atlas_overlap_major(img, atlas)
for img in imgs]
else:
......@@ -305,7 +312,7 @@ def alpe_update_weights(imgs, atlas, ovp_m=False):
return np.array(w_bins)
def alpe_repaire_atlas_weights(imgs, atlas, w_bins, lb_max):
def alpe_repaire_atlas_weights(imgs, atlas, w_bins, label_max):
"""
:param imgs: [np.array<w, h>]
......@@ -315,7 +322,8 @@ def alpe_repaire_atlas_weights(imgs, atlas, w_bins, lb_max):
"""
logger.debug('... perform repairing')
# reinit empty
atlas, w_bins = ptn_dict.reinit_atlas_likely_patterns(imgs, w_bins, atlas, lb_max)
atlas, w_bins = ptn_dict.reinit_atlas_likely_patterns(imgs, w_bins, atlas,
label_max)
return atlas, w_bins
......@@ -350,9 +358,10 @@ def alpe_update_atlas(imgs, atlas, w_bins, lb_max, gc_coef, gc_reinit, ptn_split
def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=None,
gc_coef=0.0, thr_step_diff=0.0, max_iter=99, gc_reinit=True,
ptn_split=True, w_ovp_m=False, out_prefix='debug', out_dir=''):
""" the experiments_synthetic_single_run pipeline for block coordinate descent algo with graphcut
gc_coef=0.0, thr_step_diff=0.0, max_iter=99, gc_reinit=True,
ptn_split=True, overlap_major=False, out_prefix='debug', out_dir=''):
""" the experiments_synthetic_single_run pipeline for block coordinate descent
algo with graphcut...
:param imgs: [np.array<w, h>]
:param init_atlas: np.array<w, h>
......@@ -378,7 +387,7 @@ def alpe_pipe_atlas_learning_ptn_weights(imgs, init_atlas=None, init_weights=Non
if len(np.unique(atlas)) == 1:
logger.warning('the atlas does not contain any label... %s',
repr(np.unique(atlas)))
w_bins = alpe_update_weights(imgs, atlas, w_ovp_m)
w_bins = alpe_update_weights(imgs, atlas, overlap_major)
# plt.subplot(221), plt.imshow(atlas, interpolation='nearest')
# plt.subplot(222), plt.imshow(w_bins, aspect='auto')
atlas, w_bins = alpe_repaire_atlas_weights(imgs, atlas, w_bins, lb_max)
......
"""
run experiments with Stat-of-the-art methods
tha base class for all Atomic Pattern Dictionary methods
such as the stat of the art and our newly developed
Example run:
>> nohup python experiments_sta.py > ~/Medical-temp/experiments_APD-sta/nohup.log &
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
# to suppress all visu, has to be on the beginning
......@@ -15,22 +17,23 @@ import logging
import copy_reg
import types
import argparse
import multiprocessing as mproc
import numpy as np
import pandas as pd
import multiprocessing as mproc
from sklearn import metrics
from sklearn import decomposition
import generate_dataset as gen_data
import dataset_utils as gen_data
import pattern_disctionary as ptn_dict
import src.own_utils.tool_experiments as tl_expt
logger = logging.getLogger(__name__)
# REQURED FOR MPROC POOL
# ISSUE: cPickle.PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
# http://stackoverflow.com/questions/25156768/cant-pickle-type-instancemethod-using-pythons-multiprocessing-pool-apply-a
def _reduce_method(m):
# REQURED FOR MPROC POOL
# ISSUE: cPickle.PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
# http://stackoverflow.com/questions/25156768/cant-pickle-type-instancemethod-using-pythons-multiprocessing-pool-apply-a
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
......@@ -44,9 +47,9 @@ if b_cmp:
PATH_DATA_REAL = '/datagrid/Medical/microscopy/drosophila/TEMPORARY'
PATH_OUTPUT = '/datagrid/Medical/microscopy/drosophila/TEMPORARY'
else:
PATH_DATA_SYNTH = '/home/jirka/TEMP/APD_synthetic_data'
PATH_DATA_SYNTH = '/home/b_jirka/TEMP/APD_synthetic_data'
PATH_DATA_REAL = ''
PATH_OUTPUT = '/home/jirka/TEMP'
PATH_OUTPUT = '/home/b_jirka/TEMP'
NB_THREADS = int(mproc.cpu_count() * .9)
PATH_RESULTS = os.path.join(PATH_OUTPUT, 'experiments_APD_temp')
......@@ -103,7 +106,7 @@ REAL_PARAMS.update({'data_type': 'real',
def create_args_parser(dict_params):
""" create simple arg parser with default values (input, output, dataset)
:param name: str, name of script
:param dict_params: {str: ...}
:return: object argparse<in, out, ant, name>
"""
parser = argparse.ArgumentParser()
......@@ -172,9 +175,9 @@ class ExperimentAPD(tl_expt.Experiment):
:param params: {str: ...}, parameter settings
"""
p_data = os.path.join(self.params.get('path_in'), self.params.get('dataset'))
self.gt_atlas = gen_data.dataset_create_atlas(path_base=p_data)
gt_encoding = gen_data.dataset_load_weights(path_base=p_data)
path_data = os.path.join(self.params.get('path_in'), self.params.get('dataset'))
self.gt_atlas = gen_data.dataset_create_atlas(path_data)
gt_encoding = gen_data.dataset_load_weights(path_data)
self.gt_img_rct = ptn_dict.reconstruct_samples(self.gt_atlas, gt_encoding)
def _load_data(self, gt=True):
......@@ -192,8 +195,8 @@ class ExperimentAPD(tl_expt.Experiment):
""" load image data """
path_data = os.path.join(self.params.get('path_in'),
self.params.get('dataset'))
self.imgs, self._im_names = gen_data.dataset_load_images(
self.params.get('sub_dataset'), path_data)
self.imgs, self._im_names = gen_data.dataset_load_images(path_data,
self.params.get('sub_dataset'))
def run(self, gt=True, iter_var='case', iter_vals=range(1)):
""" the main_real procedure that load, perform and evaluete experiment
......@@ -317,9 +320,8 @@ class ExperimentAPD_mp(ExperimentAPD):
""" load image data """
path_data = os.path.join(self.params.get('path_in'),
self.params.get('dataset'))
self.imgs, self._im_names = gen_data.dataset_load_images(
self.params.get('sub_dataset'),
path_data, nb_jobs=self.nb_jobs)
self.imgs, self._im_names = gen_data.dataset_load_images(path_data,
self.params.get('sub_dataset'), nb_jobs=self.nb_jobs)
def _perform_once(self, v):
""" perform single experiment
......
import numpy as np
"""
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import logging
import numpy as np
from sklearn import metrics
logger = logging.getLogger(__name__)
......
"""
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import logging
from scipy import ndimage
from skimage import morphology
import numpy as np
import matplotlib.pyplot as plt
import generate_dataset as data
import dataset_utils as data
import pattern_weights as ptn_weight
from skimage import morphology
from scipy import ndimage
import logging
logger = logging.getLogger(__name__)
......@@ -138,6 +146,7 @@ def insert_new_pattern(imgs, imgs_rc, atlas, lb):
def reinit_atlas_likely_patterns(imgs, w_bins, atlas, label_max=None):
""" walk and find all all free labels and try to reinit them by new patterns
:param label_max:
:param imgs: [np.array<w, h>] list of input images
:param w_bins: np.array<nb_imgs, nb_lbs>
:param atlas: np.array<w, h>
......
import numpy as np
import generate_dataset as data
from skimage import morphology
"""
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
......
"""
script that take th csv files with encoding with the proposed atlas
and does the back reconstruction of each image. As sub-step it compute
the reconstruction erro to evaluate he parameters and export visualisation
the reconstruction error to evaluate he parameters and export visualisation
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import os
import glob
import json
import logging
import traceback
import multiprocessing as mproc
import numpy as np
......@@ -20,6 +20,7 @@ import matplotlib.pyplot as plt
import pattern_weights as ptn_weight
logger = logging.getLogger(__name__)
RUN_DEBUG = False
VISUAL = True
......@@ -72,6 +73,7 @@ def compute_reconstruction(dict_params, path_out, img_atlas, weights, img_name):
""" reconstruct the segmentation from atlas and particular weights
and compute the reconstruction error
:param path_out: str
:param dict_params:
:param img_atlas: np.array<height, width>
:param weights: [<0,1>]
......@@ -269,7 +271,7 @@ def main(path_base=PATH_EXPERIMENTS):
process_experiment(path_expt)
if __name__ == '__main__':
def main():
logging.basicConfig(level=logging.INFO)
if RUN_DEBUG:
logging.basicConfig(level=logging.DEBUG)
......@@ -280,4 +282,8 @@ if __name__ == '__main__':
else:
main()
logger.info('DONE')
\ No newline at end of file
logger.info('DONE')
if __name__ == '__main__':
main()
......@@ -2,8 +2,9 @@
script that walk ober all segmentation, compute some statistic
and then decide while the segmentation is likey to be correct of not
"""
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import os
import glob
......@@ -15,11 +16,10 @@ import multiprocessing as mproc
# to suppress all visu, has to be on the beginning
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
from skimage import io, morphology
import matplotlib.pyplot as plt
import pandas as pd
logger = logging.getLogger(__name__)
......@@ -68,16 +68,17 @@ def labels_ration(path_seg):
'r_cx': obj_bg}
def plot_histo_labels(d_hist, path_dir=''):
def plot_histo_labels(dict_hist, path_dir=''):
""" plot some simple histogram
:param d_hist: {int: float}
:param path_dir: str
:param dict_hist: {int: float}
"""
logger.info('plotting stat. results')
fig = plt.figure()
for lb in d_hist:
plt.plot(d_hist[lb], '+', label=str(lb))
plt.xlim([0, max(len(v) for v in d_hist.itervalues())])
for lb in dict_hist:
plt.plot(dict_hist[lb], '+', label=str(lb))
plt.xlim([0, max(len(v) for v in dict_hist.itervalues())])
plt.xlabel('image samples')
plt.ylabel('label cover')
plt.legend(loc=0)
......@@ -238,6 +239,8 @@ def filter_copy_visu(path_dir_seg=PATH_SEGM, path_dir_visu=PATH_VISU):
def main():
""" the main_real entry point """
logging.basicConfig(level=logging.DEBUG)
logger.info('running...')
# defaults run
segm_detect_fails()
filter_copy_visu()
......@@ -248,12 +251,9 @@ def main():
segm_detect_fails(p_dir_seg)
filter_copy_visu(p_dir_seg, p_dir_visu)
logger.info('DONE')
# plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logger.info('running...')
if __name__ == '__main__':
main()
logger.info('DONE')
# plt.show()
......@@ -7,11 +7,10 @@ Example run:
-out /datagrid/Medical/microscopy/drosophila/TEMPORARY/experiments_APD_new \
--dataset type_1_segm_reg_binary --sub_dataset gene_ssmall
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
# to suppress all visu, has to be on the beginning
import matplotlib
matplotlib.use('Agg')
import os
import time
......@@ -19,11 +18,13 @@ import traceback
import logging
import copy
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
import generate_dataset as gen_data
import dataset_utils as gen_data
import dictionary_learning as dl
import pattern_disctionary as ptn_dict
import pattern_weights as ptn_weigth
......@@ -74,13 +75,14 @@ def experiment_pipeline_alpe_showcase(path_in=experiment_apd.SYNTH_PATH_APD,
path_out=PATH_OUTPUT):
""" an simple show case to prove that the particular steps are computed
:param path_in: str
:param path_out: str
:return:
"""
atlas = gen_data.dataset_create_atlas(path_base=path_in)
atlas = gen_data.dataset_create_atlas(path_in)
# plt.imshow(atlas)
imgs, _ = gen_data.dataset_load_images(path_base=path_in)
imgs, _ = gen_data.dataset_load_images(path_in, gen_data.NAME_DATASET)
# imgs = gen_data.dataset_load_images('datasetBinary_defNoise',
# path_base=SYNTH_PATH_APD)
......@@ -133,10 +135,10 @@ class ExperimentALPE(experiment_apd.ExperimentAPD):
os.mkdir(path_out)
try:
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(self.imgs,
init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
gc_coef=p['gc_regul'], max_iter=p['max_iter'],
ptn_split=p['ptn_split'], w_ovp_m=p['overlap_mj'],
out_dir=path_out) # , out_prefix=prefix
init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
gc_coef=p['gc_regul'], max_iter=p['max_iter'],
ptn_split=p['ptn_split'], overlap_major=p['overlap_mj'],
out_dir=path_out) # , out_prefix=prefix
except:
logger.error('FAILED, no atlas estimated!')
logger.error(traceback.format_exc())
......@@ -271,7 +273,7 @@ def main_real(nb_jobs=NB_THREADS):
# experiments_real(dataset=name, nb_jobs=nb_jobs)
if __name__ == "__main__":
def main():
logging.basicConfig(level=logging.DEBUG)
logger.info('running...')
......@@ -285,3 +287,7 @@ if __name__ == "__main__":
logger.info('DONE')
# plt.show()
if __name__ == "__main__":
main()
\ No newline at end of file
......@@ -4,15 +4,15 @@ run experiments with Stat-of-the-art methods
Example run:
>> nohup python experiments_sta.py > ~/Medical-temp/experiments_APD-sta/nohup.log &
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
# to suppress all visu, has to be on the beginning
# import matplotlib
# matplotlib.use('Agg')
import copy
import logging
# to suppress all visu, has to be on the beginning
import matplotlib
matplotlib.use('Agg')
import numpy as np
from sklearn import decomposition
......@@ -236,18 +236,19 @@ def experiments_synthetic_multi_run(dataset=None,
def main():
""" main_real entry point """
experiments_synthetic_single_run()
# experiments_synthetic_multi_run()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('running...')
experiments_test()
# main_real()
experiments_synthetic_single_run()
# experiments_synthetic_multi_run()
logger.info('DONE')
# plt.show()
if __name__ == "__main__":
main()
"""
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import os
import logging
import dataset_utils as tl_dataset
logger = logging.getLogger(__name__)
b_jirka = False
if b_jirka:
DEFAULT_PATH_DATA = '/jirka/b_jirka/TEMP/'
else:
# PATH_DATA_SYNTH = '/datagrid/Medical/microscopy/drosophila_segmOvary/'
DEFAULT_PATH_DATA = '/datagrid/temporary/Medical/'
DEFAULT_DIR_APD = 'atomicPatternDictionary_vx'
DEFAULT_PATH_APD = os.path.join(DEFAULT_PATH_DATA, DEFAULT_DIR_APD)
NAME_WEIGHTS = 'combination.csv'
DATASET_TYPE = '2D'
def generate_all_2d(path_out=DEFAULT_PATH_APD, csv_name=NAME_WEIGHTS):
""" generate complete dataset containing dictionary od patterns and also
input binary / probab. images with geometrical deformation and random noise
:param csv_name: str
:param path_out: str, path to the results directory
"""
if not os.path.exists(path_out):
os.mkdir(path_out)
# im_dict = dictionary_generate_rnd_pattern()
im_dict = tl_dataset.dictionary_generate_atlas_2d(path_out)
path_out = os.path.join(path_out, 'datasetBinary_raw')
im_comb, df_comb = tl_dataset.dataset_binary_combine_patterns(im_dict, path_out)
df_comb.to_csv(os.path.join(path_out, csv_name))
path_out = os.path.join(path_out, 'datasetBinary_deform')
im_deform = tl_dataset.dataset_binary_deform_images(im_comb, path_out)
path_out = os.path.join(path_out, 'datasetBinary_noise')
tl_dataset.dataset_add_image_noise(im_comb, path_out,
tl_dataset.add_image_binary_noise, 0.03)
path_out = os.path.join(path_out, 'datasetBinary_defNoise')
tl_dataset.dataset_add_image_noise(im_deform, path_out,
tl_dataset.add_image_binary_noise, 0.03)
path_out = os.path.join(path_out, 'datasetProb_raw')
im_comb_prob = tl_dataset.dataset_prob_construct(im_comb, path_out)
path_out = os.path.join(path_out, 'datasetProb_deform')
im_def_prob = tl_dataset.dataset_prob_construct(im_deform, path_out)
path_out = os.path.join(path_out, 'datasetProb_noise')
tl_dataset.dataset_add_image_noise(im_comb_prob, path_out,
tl_dataset.add_image_prob_noise, 0.2)
path_out = os.path.join(path_out, 'datasetProb_defNoise')
tl_dataset.dataset_add_image_noise(im_def_prob, path_out,
tl_dataset.add_image_prob_noise, 0.2)
def convert_dataset_nifti(p_datasets=DEFAULT_PATH_APD):
tl_dataset.dataset_convert_nifti(os.path.join(p_datasets, 'datasetBinary_raw'),
os.path.join(p_datasets, 'datasetBinary_raw_nifti'))
def main():
logging.basicConfig(level=logging.DEBUG)
logger.info('running...')
# test_Ellipse()
if DATASET_TYPE == '2D':
generate_all_2d()
elif DATASET_TYPE == '3D':
# TODO
pass
# convert_dataset_nifti()
logger.info('DONE')
if __name__ == "__main__":
main()
\ No newline at end of file
......@@ -2,23 +2,26 @@
This script parse the csv with encoding and extend then by information
from general drosophila information file
Second it does mean activation on aggregated gene ids
Copyright (C) 2015-2016 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import os
import sys
import glob
import json
import time
import gc
import time
import logging
import multiprocessing as mproc
from functools import partial
import numpy as np
import pandas as pd
sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root
import src.segmentation.tool_superpixels as tl_spx
import src.atm_ptn_dict.generate_dataset as gen_data
import src.atm_ptn_dict.dataset_utils as gen_data
import src.atm_ptn_dict.pattern_weights as ptn_weight
import src.atm_ptn_dict.run_apd_reconstruction as r_reconst
......@@ -96,7 +99,7 @@ def recompute_encoding(atlas, path_csv):
path_expt = os.path.dirname(path_csv)
config = load_config_json(path_expt)
path_in = os.path.join(config.get('path_in'), config.get('dataset'))
imgs, im_names = gen_data.dataset_load_images(config.get('sub_dataset'), path_in)
imgs, im_names = gen_data.dataset_load_images(path_in, config.get('sub_dataset'))
weights = [ptn_weight.weights_image_atlas_overlap_major(img, atlas) for img in imgs]
df = pd.DataFrame(data=np.array(weights), index=im_names)
df.columns = ['ptn {}'.format(lb + 1) for lb in df.columns]
......@@ -134,12 +137,11 @@ def main(path_csv_main=PATH_CSV_MAIN, path_experiemnts=PATH_EXPERIMENTS):
list_csv = [p for p in glob.glob(os.path.join(path_dir, 'encoding_*.csv'))
if not p.endswith('_gene.csv')]
mp_tuples = zip(list_csv, [df_main] * len(list_csv))
if RUN_DEBUG:
map(mproc_wrapper, mp_tuples)
map(partial(process_experiment, df_main=df_main), list_csv)
else:
mproc_pool = mproc.Pool(len(mp_tuples))
mproc_pool.map(mproc_wrapper, mp_tuples)
mproc_pool = mproc.Pool(len(list_csv))
mproc_pool.map(partial(process_experiment, df_main=df_main), list_csv)
mproc_pool.close()
mproc_pool.join()
......