Commit 781b7187 by Jiri Borovec

update

parent d011027e
......@@ -14,7 +14,7 @@ all data are stored on standard paths within the university datagrid
## Synthetic datasets
all experiment ar located in **experiments_alpe.py**
all experiment are located in **experiments_alpe.py**
1. simple check where the unary cost is computet correctly in 3 simple images
2. performing experiment on synthetic clear dataset
......
......@@ -14,8 +14,7 @@ import os
import time
import traceback
import logging
import copy_reg
import types
import numpy as np
import matplotlib.pylab as plt
......@@ -26,8 +25,12 @@ import dictionary_learning as dl
import ptn_disctionary as ptn_dict
import ptn_weights as ptn_weigth
import expt_apd_sta as exp_sta
logger = logging.getLogger(__name__)
NB_THREADS = exp_sta.NB_THREADS
PATH_OUTPUT = exp_sta.PATH_OUTPUT
# REQURED FOR MPROC POOL
# ISSUE: cPickle.PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
# http://stackoverflow.com/questions/25156768/cant-pickle-type-instancemethod-using-pythons-multiprocessing-pool-apply-a
......@@ -42,9 +45,7 @@ logger = logging.getLogger(__name__)
def test_simple_show_case():
"""
:return:
"""
""" """
# implement simple case just with 2 images and 2/3 classes in atlas
atlas = gen_data.get_simple_atlas()
# atlas2 = atlas.copy()
......@@ -64,7 +65,7 @@ def test_simple_show_case():
plt.imshow(img, cmap='gray', interpolation='nearest')
t = time.time()
uc = dl.compute_relative_penaly_images_weights(imgs, np.array(ws))
logger.debug('elapsed TIME: {}'.format(time.time() - t))
logger.debug('elapsed TIME: %s', repr(time.time() - t))
res = dl.estimate_atlas_graphcut_general(imgs, np.array(ws), 0.)
plt.subplot(gs[0, -1]), plt.title('result')
plt.imshow(res, cmap=cm, interpolation='nearest'), plt.colorbar()
......@@ -75,13 +76,12 @@ def test_simple_show_case():
plt.imshow(uc[:,:,i], vmin=0, vmax=1, interpolation='nearest')
plt.title('cost lb #{}'.format(i)), plt.colorbar()
# logger.debug(uc)
return
def experiment_pipeline_alpe_showcase(p_out=exp_sta.DEFAULT_PATH_OUTPUT):
def experiment_pipeline_alpe_showcase(path_out=PATH_OUTPUT):
""" an simple show case to prove that the particular steps are computed
:param p_out: str
:param path_out: str
:return:
"""
atlas = gen_data.dataset_create_atlas(path_base=exp_sta.SYNTH_PATH_APD)
......@@ -97,55 +97,10 @@ def experiment_pipeline_alpe_showcase(p_out=exp_sta.DEFAULT_PATH_OUTPUT):
init_encode_rnd = ptn_weigth.initialise_weights_random(len(imgs), np.max(atlas))
atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(imgs, out_prefix='mosaic',
init_atlas=init_atlas_msc, max_iter=9, out_dir=p_out)
init_atlas=init_atlas_msc, max_iter=9, out_dir=path_out)
return atlas, w_bins
# class ALPE(object):
#
# def __init__(self, params):
# self._params = copy.deepcopy(params)
#
# def _init_atlas(self, imgs):
# """ init atlas according an param
#
# :return: np.array<w, h>
# """
# im_size = imgs[0].shape
# nb_lbs = self._params['nb_lbs']
# if self._params['init_tp'] == 'msc':
# init_atlas = ptn_dict.initialise_atlas_mosaic(im_size, nb_lbs)
# else:
# init_atlas = ptn_dict.initialise_atlas_random(im_size, nb_lbs)
# return init_atlas
#
# def estimate_atlas(self, i, imgs):
# """ set all params and run the atlas estimation in try mode
#
# :param i: int, index of try
# :param init_atlas: np.array<w, h>
# :return:
# """
# init_atlas = self._init_atlas(imgs)
# p = self._params
# # prefix = 'expt_{}'.format(p['init_tp'])
# p_out = os.path.join(p['exp_path'], 'case_{:05d}'.format(i))
# if not os.path.exists(p_out):
# os.mkdir(p_out)
# try:
# atlas, w_bins = dl.alpe_pipe_atlas_learning_ptn_weights(imgs,
# init_atlas=init_atlas, gc_reinit=p['gc_reinit'],
# gc_coef=p['gc_regul'], max_iter=p['max_iter'],
# ptn_split=p['ptn_split'], w_ovp_m=p['overlap_mj'],
# out_dir=p_out) # , out_prefix=prefix
# except:
# logger.error('NO atlas estimated!')
# logger.error(traceback.format_exc())
# atlas = np.zeros_like(imgs[0])
# w_bins = np.zeros((len(imgs), 1))
# return atlas, w_bins
class ExperimentALPE(exp_sta.ExperimentAPD):
"""
the main experiment or our Atlas Learning Pattern Encoding
......@@ -209,82 +164,96 @@ class ExperimentALPE(exp_sta.ExperimentAPD):
class ExperimentALPE_mp(ExperimentALPE, exp_sta.ExperimentAPD_mp):
"""
parrallel version of ALPE
parallel version of ALPE
"""
pass
def experiments_test():
""" simple test of the experiments
:return:
"""
""" simple test of the experiments """
# experiment_pipeline_alpe_showcase()
params = exp_sta.SYNTH_PARAMS.copy()
params['nb_runs'] = 3
logger.info('RUN: ExperimentALPE')
expt = ExperimentALPE(params)
expt.run(it_var='case', it_vals=range(params['nb_runs']))
expt.run(iter_var='case', iter_vals=range(params['nb_runs']))
logger.info('RUN: ExperimentALPE_mp')
expt_p = ExperimentALPE_mp(params)
expt_p.run(it_var='case', it_vals=range(params['nb_runs']))
return
expt_p.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_synthetic(dataset=None, nb_jobs=exp_sta.DEFAULT_NB_THREADS):
def experiments_synthetic(dataset=None, nb_jobs=NB_THREADS):
""" run all experiments
:return:
:param dataset: str
:param nb_jobs: int
"""
logging.basicConfig(level=logging.INFO)
params = exp_sta.SYNTH_PARAMS.copy()
if type(dataset) is str:
params.update({'dataset': dataset})
l_params = [params]
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset', exp_sta.SYNTH_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset',
exp_sta.SYNTH_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = exp_sta.extend_l_params(l_params, 'ptn_split', [True, False])
range_nb_lbs = exp_sta.SYNTH_PTN_RANGE[exp_sta.SYNTH_DATASET_VERSION]
l_params = exp_sta.extend_l_params(l_params, 'nb_lbs', range_nb_lbs)
l_params = exp_sta.extend_l_params(l_params, 'gc_regul', [0., 1e-3, 1e-1, 1e0])
logger.debug('list params: {}'.format(len(l_params)))
logger.debug('list params: %i', len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, exp_sta.DEFAULT_NB_THREADS)
exp = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
exp.run(it_var='case', it_vals=range(params['nb_runs']))
return
exp.run(iter_var='case', iter_vals=range(params['nb_runs']))
def experiments_real(dataset=None, nb_jobs=exp_sta.DEFAULT_NB_THREADS):
def experiments_real(dataset=None, nb_jobs=NB_THREADS):
""" run all experiments
:return:
:param dataset: str
:param nb_jobs: int
"""
logging.basicConfig(level=logging.INFO)
params = exp_sta.REAL_PARAMS.copy()
if type(dataset) is str:
if isinstance(dataset, str):
params.update({'dataset': dataset})
l_params = [params]
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset', exp_sta.REAL_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'sub_dataset',
exp_sta.REAL_SUB_DATASETS)
l_params = exp_sta.extend_l_params(l_params, 'init_tp', ['msc', 'rnd'])
l_params = exp_sta.extend_l_params(l_params, 'ptn_split', [True, False])
l_params = exp_sta.extend_l_params(l_params, 'nb_lbs', range(5, 12, 2) + range(15, 35, 4))
l_params = exp_sta.extend_l_params(l_params, 'nb_lbs',
range(5, 12, 3) + range(12, 35, 5))
l_params = exp_sta.extend_l_params(l_params, 'gc_regul', [0., 1e-3, 1e-1, 1e0])
logger.debug('list params: {}'.format(len(l_params)))
logger.debug('list params: %i', len(l_params))
for params in l_params:
if nb_jobs > 1:
exp = ExperimentALPE_mp(params, exp_sta.DEFAULT_NB_THREADS)
exp = ExperimentALPE_mp(params, nb_jobs)
else:
exp = ExperimentALPE(params)
exp.run(gt=False, it_var='case', it_vals=range(params['nb_runs']))
return
exp.run(gt=False, iter_var='case', iter_vals=range(params['nb_runs']))
def main():
# experiments_synthetic()
# experiments_real(nb_jobs=1)
datasets = ['type_{}_segm_reg_binary'.format(i) for i in range(1, 5)]
for name in datasets:
experiments_real(name, nb_jobs=1)
if __name__ == "__main__":
......@@ -293,12 +262,9 @@ if __name__ == "__main__":
# test_encoding(atlas, imgs, encoding)
# test_atlasLearning(atlas, imgs, encoding)
# experiments_test()
experiments_synthetic()
experiments_test()
# experiments_real(nb_jobs=1)
# experiments_real('1000_imgs_binary')
# main()
logger.info('DONE')
plt.show()
\ No newline at end of file
......@@ -15,7 +15,7 @@ def initialise_atlas_random(im_size, max_lb):
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas {} as random labeling'.format(im_size))
logger.debug('initialise atlas %s as random labeling', repr(im_size))
nb_lbs = max_lb + 1
im = np.random.randint(1, nb_lbs, im_size)
return np.array(im, dtype=np.int)
......@@ -29,10 +29,10 @@ def initialise_atlas_mosaic(im_size, max_lb):
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas {} as grid labeling'.format(im_size))
logger.debug('initialise atlas %s as grid labeling', repr(im_size))
nb_lbs = max_lb
block = np.ones(np.ceil(im_size / np.array(nb_lbs, dtype=np.float)))
logger.debug('block size is {}'.format(block.shape))
logger.debug('block size is %s', repr(block.shape))
for l in range(nb_lbs):
idx = np.random.permutation(range(1, nb_lbs + 1))
for k in range(nb_lbs):
......@@ -43,8 +43,8 @@ def initialise_atlas_mosaic(im_size, max_lb):
row = np.hstack((row, b))
if l == 0: mosaic = row
else: mosaic = np.vstack((mosaic, row))
logger.debug('generated mosaic {} with labeling'.format(mosaic.shape,
np.unique(mosaic).tolist()))
logger.debug('generated mosaic %s with labeling %s',
repr(mosaic.shape), repr(np.unique(mosaic).tolist()))
im = mosaic[:im_size[0], :im_size[1]]
return np.array(im, dtype=np.int)
......@@ -130,7 +130,7 @@ def insert_new_pattern(imgs, imgs_rc, atlas, lb):
# logger.debug('new im_ptn: {}'.format(np.sum(im_ptn) / np.prod(im_ptn.shape)))
# plt.imshow(im_ptn), plt.title('im_ptn'), plt.show()
atlas[im_ptn == True] = lb
logger.debug('area of new pattern is {}'.format(np.sum(atlas == lb)))
logger.debug('area of new pattern is %i', np.sum(atlas == lb))
return atlas
......@@ -146,27 +146,27 @@ def reinit_atlas_likely_patterns(imgs, w_bins, atlas, lb_max=None):
if lb_max is None:
lb_max = max(np.max(atlas), w_bins.shape[1])
else:
logger.debug('compare w_bin {} to max {}'.format(w_bins.shape, lb_max))
logger.debug('compare w_bin %s to max %i', repr(w_bins.shape), lb_max)
for i in range(w_bins.shape[1], lb_max):
logger.debug('adding disappeared weigh column {}'.format(i))
logger.debug('adding disappeared weigh column %i', i)
w_bins = np.append(w_bins, np.zeros((w_bins.shape[0], 1)), axis=1)
w_bin_ext = np.append(np.zeros((w_bins.shape[0], 1)), w_bins, axis=1)
logger.debug('IN > sum over weights: {}'.format(np.sum(w_bin_ext, axis=0)))
logger.debug('IN > sum over weights: %s', repr(np.sum(w_bin_ext, axis=0)))
# add one while indexes does not cover 0 - bg
logger.debug('total nb labels: {}'.format(lb_max))
for l in range(1, lb_max + 1):
l_w = l - 1
w_sum = np.sum(w_bins[:, l_w])
logger.debug('reinit lb: {} with weight sum {}'.format(l, w_sum))
logger.debug('total nb labels: %i', lb_max)
for lb in range(1, lb_max + 1):
lb_w = lb - 1
w_sum = np.sum(w_bins[:, lb_w])
logger.debug('reinit lb: %i with weight sum %i', lb, w_sum)
if w_sum > 0:
continue
imgs_rc = reconstruct_samples(atlas, w_bins)
atlas = insert_new_pattern(imgs, imgs_rc, atlas, l)
logger.debug('w_bins before: {}'.format(np.sum(w_bins[:, l_w])))
atlas = insert_new_pattern(imgs, imgs_rc, atlas, lb)
logger.debug('w_bins before: %i', np.sum(w_bins[:, lb_w]))
lim_repopulate = 100. / np.prod(atlas.shape)
w_bins[:, l_w] = ptn_weight.weights_label_atlas_overlap_threshold(imgs,
atlas, l, lim_repopulate)
logger.debug('w_bins after: {}'.format(np.sum(w_bins[:, l_w])))
w_bins[:, lb_w] = ptn_weight.weights_label_atlas_overlap_threshold(imgs,
atlas, lb, lim_repopulate)
logger.debug('w_bins after: %i', np.sum(w_bins[:, lb_w]))
return atlas, w_bins
......@@ -178,31 +178,25 @@ def atlas_split_indep_ptn(atlas, lb_max):
:return:
"""
l_ptns = []
for l in np.unique(atlas):
labeled, nb_objects = ndimage.label(atlas == l)
logger.debug('for lb {} detected #{}'.format(l, nb_objects))
for lb in np.unique(atlas):
labeled, nb_objects = ndimage.label(atlas == lb)
logger.debug('for label %i detected #%i', lb, nb_objects)
ptn = [(labeled == j) for j in np.unique(labeled)]
# skip the largest one assuming to be background
l_ptns += sorted(ptn, key=lambda x: np.sum(x), reverse=True)[1:]
l_ptns = sorted(l_ptns, key=lambda x: np.sum(x), reverse=True)
logger.debug('list of all areas {}'.format([np.sum(p) for p in l_ptns]))
logger.debug('list of all areas %s', repr([np.sum(p) for p in l_ptns]))
atlas_new = np.zeros(atlas.shape, dtype=np.int)
# take just lb_max largest elements
for i, ptn in enumerate(l_ptns[:lb_max]):
l = i + 1
logger.debug('pattern #{} area {}'.format(l, np.sum(ptn)))
lb = i + 1
logger.debug('pattern #%i area %i', lb, np.sum(ptn))
# plt.subplot(1,lb_max,l), plt.imshow(ptn), plt.colorbar()
atlas_new[ptn] = l
atlas_new[ptn] = lb
# plt.figure()
# plt.subplot(121), plt.imshow(atlas), plt.colorbar()
# plt.subplot(122), plt.imshow(atlas_new), plt.colorbar()
# plt.show()
logger.debug('atlas unique {}'.format(np.unique(atlas_new)))
logger.debug('atlas unique %s', repr(np.unique(atlas_new)))
return atlas_new
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('DONE')
\ No newline at end of file
......@@ -13,8 +13,8 @@ def initialise_weights_random(nb_imgs, nb_lbs, ratio_sel=0.2):
1 means all and 0 means none
:return: np.array<nb_imgs, nb_lbs>
"""
logger.debug('initialise weights for {} images and {} labels '
'as random selection'.format(nb_imgs, nb_lbs))
logger.debug('initialise weights for %i images and %i labels '
'as random selection', nb_imgs, nb_lbs)
prob = np.random.random((nb_imgs, nb_lbs))
weights = np.zeros_like(prob)
weights[prob <= ratio_sel] = 1
......@@ -27,8 +27,8 @@ def convert_weights_binary2indexes(weights):
:param weights: np.array<nb_imgs, nb_lbs>
:return: [[int, ...]] * nb_imgs
"""
logger.debug('convert binary weights {} '
'to list of indexes with True'.format(weights.shape))
logger.debug('convert binary weights %s to list of indexes with True',
repr(weights.shape))
# if type(weights)==np.ndarray: weights = weights.tolist()
w_idx = [None] * weights.shape[0]
for i in range(weights.shape[0]):
......@@ -108,8 +108,3 @@ def weights_label_atlas_overlap_threshold(imgs, atlas, lb, thr=1e-3):
weight[i] = 1
return np.array(weight)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('DONE')
\ No newline at end of file
......@@ -8,31 +8,39 @@ and then decide while the segmentation is likey to be correct of not
import os
import glob
import logging
import multiprocessing as mproc
import itertools
import shutil
import multiprocessing as mproc
# to suppress all visu, has to be on the beginning
import matplotlib
matplotlib.use('Agg')
import numpy as np
from skimage import io
from skimage import io, morphology
import matplotlib.pyplot as plt
import pandas as pd
logger = logging.getLogger(__name__)
PATH_BASE = '/datagrid/Medical/microscopy/drosophila/'
# PATH_SEGM = os.path.join(PATH_BASE, 'TEMPORARY/orig_segm')
PATH_SEGM = os.path.join(PATH_BASE, 'real_segmentations/orig_segm')
PATH_SEGM = os.path.join(PATH_BASE, 'RESULTS/orig_segm')
PATH_VISU = os.path.join(PATH_BASE, 'TEMPORARY/orig_visu')
# PATH_SEGM = os.path.join(PATH_BASE, 'real_segmentations/stage_4_segm')
# PATH_VISU = os.path.join(PATH_BASE, 'real_segmentations/stage_4_visu')
NB_JOBS = mproc.cpu_count()
CSV_SEGM_GOOD = 'segm_good.csv'
CSV_SEGM_FAIL = 'segm_fail.csv'
FIG_STAT = 'stat_segm_labels.jpeg'
PREFIX_VISU_SEGM = 'visu_segm_'
# size around image borders
NB_IMG_CORNER = 50
# ration how much backround has to be around borders
THR_CORNER_BG = 0.95
# ration for total num,ber of backround
THT_BACKGROUND = 0.95
# ration of bacground in object convex hull
THT_CONVEX = 0.85
def labels_ration(p_seg):
......@@ -51,9 +59,13 @@ def labels_ration(p_seg):
seg[:, :NB_IMG_CORNER].ravel(),
seg[:, -NB_IMG_CORNER:].ravel()))
r_bg = np.sum(seg_border == 0) / float(seg_border.shape[0])
seg_fg = morphology.binary_closing(seg > 0, morphology.disk(30))
obj_convex = morphology.convex_hull_object(seg_fg)
obj_bg = np.sum(seg_fg[obj_convex] > 0) / float(np.sum(obj_convex))
return {'name': n_seg,
'lb_hist': d_lb_hist,
'r_bg': r_bg}
'r_bg': r_bg,
'r_cx': obj_bg}
def plot_histo_labels(d_hist, p_dir=''):
......@@ -61,7 +73,7 @@ def plot_histo_labels(d_hist, p_dir=''):
:param d_hist: {int: float}
"""
logger.debug('plotting results')
logger.info('plotting stat. results')
fig = plt.figure()
for lb in d_hist:
plt.plot(d_hist[lb], '+', label=str(lb))
......@@ -81,9 +93,8 @@ def read_make_hist(p_segs):
:return:[str, {int: float}] list or pairs with image name
and relative label histogram
"""
nb_jobs = mproc.cpu_count()
logger.debug('run in %i threads...', nb_jobs)
mproc_pool = mproc.Pool(nb_jobs)
logger.debug('run in %i threads...', NB_JOBS)
mproc_pool = mproc.Pool(NB_JOBS)
l_desc = mproc_pool.map(labels_ration, p_segs)
mproc_pool.close()
mproc_pool.join()
......@@ -100,7 +111,7 @@ def merge_hist_stat(l_n_hist):
logger.debug('merge partial results...')
l_hist = [l['lb_hist'] for l in l_n_hist]
lbs = itertools.chain(*[h.keys() for h in l_hist])
u_lbs = np.unique(lbs).tolist()
u_lbs = np.unique(list(lbs)).tolist()
d_hist = {lb: [h[lb] for h in l_hist if lb in h]
for lb in u_lbs}
# join the foregrounds
......@@ -109,6 +120,9 @@ def merge_hist_stat(l_n_hist):
d_hist['fg'].append(hist.get(1, 0) + hist.get(2, 0))
logger.debug('compute statistic...')
for lb, vals in d_hist.iteritems():
if len(vals) == 0:
logger.warning('label %s has no values to compute', str(lb))
continue
logger.info('label %s with mean %f, median %f, std %f',
str(lb), np.mean(vals), np.median(vals), np.std(vals))
logger.debug(' -> count outliers: %i',
......@@ -129,7 +143,10 @@ def segm_decision(l_desc, d_hist):
for i, desc in enumerate(l_desc):
fg = desc['lb_hist'].get(1, 0) + desc['lb_hist'].get(2, 0)
b_range = abs(fg - fg_median) <= 3 * fg_std
if desc['lb_hist'][0] < 0.85 and b_range and desc['r_bg'] > 0.95:
if b_range \
and desc['lb_hist'][0] < THT_BACKGROUND \
and desc['r_bg'] > THR_CORNER_BG \
and desc['r_cx'] > THT_CONVEX:
l_good.append(desc['name'])
else:
l_fail.append(desc['name'])
......@@ -144,10 +161,6 @@ def export_results(p_dir, l_good, l_fail):
:param l_fail: [str], names of images
"""
logger.info('export results as CSV files')
# with open(os.path.join(PATH_SEGM, 'segm_good.csv'), 'w') as f:
# f.writelines(['images'] + l_good)
# with open(os.path.join(PATH_SEGM, 'segm_fail.csv'), 'w') as f:
# f.writelines(['images'] + l_fail)
pd.DataFrame(['images'] + l_good).to_csv(
os.path.join(p_dir, CSV_SEGM_GOOD), index=False, header=False)
pd.DataFrame(['images'] + l_fail).to_csv(
......@@ -180,6 +193,27 @@ def segm_detect_fails(p_dir=PATH_SEGM, im_ptn='*.png'):
plot_histo_labels(d_hist, p_dir)
def mproc_copy_file(mp_set):
shutil.copyfile(*mp_set)
def copy_files(l_imgs, p_dir_visu, p_out):
""" copy list of images in multi thread
:param l_imgs: [str]
:param p_dir_visu: str
:param p_out: str
"""
pp_dir_visu = os.path.join(p_dir_visu, PREFIX_VISU_SEGM)
pp_out = os.path.join(p_out, PREFIX_VISU_SEGM)
mp_set = [(pp_dir_visu + n_img, pp_out + n_img) for n_img in l_imgs]
mproc_pool = mproc.Pool(NB_JOBS)
mproc_pool.map(mproc_copy_file, mp_set)
mproc_pool.close()
mproc_pool.join()
def filter_copy_visu(p_dir_seg=PATH_SEGM, p_dir_visu=PATH_VISU):
""" load csv file vith good and bad segmentation and in the visual folder
create subfolder for good and bad segm and copy relevant iimages there
......@@ -187,19 +221,19 @@ def filter_copy_visu(p_dir_seg=PATH_SEGM, p_dir_visu=PATH_VISU):
:param p_dir_seg: str
:param p_dir_visu: str
"""
logger.debug('segmentation: %s, visual: %s', p_dir_seg, p_dir_visu)
logger.info('filter and copy cases')
logger.debug('segmentation: %s,\n visual: %s', p_dir_seg, p_dir_visu)
for n_csv in [CSV_SEGM_GOOD, CSV_SEGM_FAIL]:
logger.debug('reading "%s"', n_csv)
logger.info('reading "%s"', n_csv)
p_out = os.path.join(p_dir_visu, os.path.splitext(n_csv)[0])
shutil.rmtree(p_out)
if not os.path.exists(p_out):
os.mkdir(p_out)
if os.path.exists(p_out):
logger.debug('remove old dir %s', p_out)
shutil.rmtree(p_out)
os.mkdir(p_out)
df = pd.DataFrame.from_csv(os.path.join(p_dir_seg, n_csv),
index_col=False)
logger.debug('copy %i images to "%s"', len(df), n_csv)
for n_img in df['images'].values.tolist():
shutil.copyfile(os.path.join(p_dir_visu, 'visu_' + n_img),
os.path.join(p_out, 'visu_' + n_img))
logger.info('copy %i images to "%s"', len(df), n_csv)
copy_files(df['images'].values.tolist(), p_dir_visu, p_out)
def main():
......@@ -209,10 +243,8 @@ def main():
filter_copy_visu()
for idx in range(1, 5):
p_dir_seg = os.path.join(PATH_BASE, 'real_segmentations/'
'stage_{}_segm'.format(idx))
p_dir_visu = os.path.join(PATH_BASE, 'TEMPORARY/'
'stage_{}_visu'.format(idx))
p_dir_seg = os.path.join(PATH_BASE, 'RESULTS/type_{}_segm'.format(idx))
p_dir_visu = os.path.join(PATH_BASE, 'TEMPORARY/type_{}_visu'.format(idx))
segm_detect_fails(p_dir_seg)
filter_copy_visu(p_dir_seg, p_dir_visu)
......@@ -224,4 +256,4 @@ if __name__ == '__main__':
main()
logger.info('DONE')
plt.show()
\ No newline at end of file
# plt.show()
......@@ -18,26 +18,24 @@ from skimage import morphology
import logging
logger = logging.getLogger(__name__)
jirka = False
if jirka:
DEFAULT_PATH_DATA = '/jirka/jirka/TEMP/APD_real_data'
else:
DEFAULT_PATH_DATA = '/datagrid/Medical/microscopy/drosophila/real_segmentations'
# REAL_DATASET_NAME = '108_genes_expression'
# DEFAULT_PATH_DATA = '/jirka/jirka/TEMP/APD_real_data'
DEFAULT_PATH_DATA = '/datagrid/Medical/microscopy/drosophila/'
# REAL_DATASET_NAME = '1000_ims'
# IMAGE_PATTERN = '*_exp'
# REAL_DATASET_NAME = '1000_images_improved'
# IMAGE_PATTERN = '*_seg_de'
REAL_DATASET_NAME = '1000_images_improved'
IMAGE_PATTERN = '*_seg_de'
REAL_DATASET_NAME = 'type_1_segm_reg'
IMAGE_PATTERN = '*'
DEFAULT_PARAMS = {
'computer': os.uname(),
'in_path': DEFAULT_PATH_DATA,
# 'computer': os.uname(),
'in_path': os.path.join(DEFAULT_PATH_DATA, 'RESULTS'),
'dataset': REAL_DATASET_NAME,
'out_path': DEFAULT_PATH_DATA,
'out_path': os.path.join(DEFAULT_PATH_DATA, 'TEMPORARY'),
# 'binary': ['fix', 'otsu', 'adapt'],
'binary': ['3cls'],
}
BINARY_POSIX = '_binary'
NB_THREADS = int(mproc.cpu_count() * 0.7)
......@@ -132,6 +130,16 @@ def threshold_image_adapt(img):
return img_th
def threshold_image_3cls_gene(img):
img_th = img >= (2 / 3.)
return img_th
def threshold_image_3cls_disc(img):
img_th = img >= (1 / 3.)
return img_th
def threshold_images(imgs, fn_th, nb_jobs=NB_THREADS):
""" threshold images by specific level
......@@ -154,37 +162,51 @@ def binarise_main(params=DEFAULT_PARAMS):
:return:
"""
imgs, names = gen_data.dataset_load_images(params['dataset'], params['in_path'],
im_ptn=IMAGE_PATTERN)
im_ptn=IMAGE_PATTERN, nb_jobs=NB_THREADS)
logger.debug('loaded {} images of size {}'.format(len(imgs), imgs[0].shape))
imgs = extend_images(imgs)
imgs = crop_images(imgs, 1e-3)
gc.collect(), time.sleep(1)
p_export = os.path.join(params['in_path'], params['dataset'] + BINARY_POSIX)
p_export = os.path.join(params['out_path'], params['dataset'] + BINARY_POSIX)
if not os.path.exists(p_export):
os.mkdir(p_export)
imgs_th = threshold_images(imgs, threshold_image_fix)
p_out = os.path.join(p_export, 'binary-fix_{}'.format(IMAGE_BINARY_THRESHOLD))
gen_data.dataset_export_images(p_out, imgs_th, names, nb_jobs=NB_THREADS)
gc.collect(), time.sleep(1)
if 'fix' in params['binary']:
imgs_th = threshold_images(imgs, threshold_image_fix)
p_out = os.path.join(p_export, 'binary-fix_{}'.format(IMAGE_BINARY_THRESHOLD))
gen_data.dataset_export_images(p_out, imgs_th, names, nb_jobs=NB_THREADS)
gc.collect(), time.sleep(1)
imgs_th = threshold_images(imgs, threshold_image_otsu)
p_out = os.path.join(p_export, 'binary-otsu')
gen_data.dataset_export_images(p_out, imgs_th, names, nb_jobs=NB_THREADS)
gc.collect(), time.sleep(1)
if 'otsu' in params['binary']:
imgs_th = threshold_images(imgs, threshold_image_otsu)
p_out = os.path.join(p_export, 'binary-otsu')
gen_data.dataset_export_images(p_out, imgs_th, names, nb_jobs=NB_THREADS)
gc.collect(), time.sleep(1)
if 'adapt' in params['binary']:
imgs_th = threshold_images(imgs, threshold_image_adapt)
p_out = os.path.join(p_export, 'binary-adapt')
gen_data.dataset_export_images(p_out, imgs_th, names, nb_jobs=NB_THREADS)