Commit 4637da03 by Jiri Borovec

reconstruction

parent 3798ddc8
"""
script that take th csv files with encoding with the proposed atlas
and does the back reconstruction of each image. As sub-step it compute
the reconstruction erro to evaluate he parameters and export visualisation
"""
import os
import glob
import json
import logging
import traceback
import multiprocessing as mproc
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
PATH_BASE = '/datagrid/Medical/microscopy/drosophila/'
PATH_EXPERIMENTS = os.path.join(PATH_BASE, 'TEMPORARY', 'experiments_APD_new')
NAME_EXPERIMENT = 'ExperimentALPE_mp_real_type_1_segm_reg_binary_gene_ssmall'
SAMPLE_PATH_EXPERIMENT = os.path.join(PATH_EXPERIMENTS, NAME_EXPERIMENT)
NAME_CONFIG = 'config.json'
PREFIX_ATLAS = 'atlas_'
PREFIX_ENCODE = 'encoding_'
PREFIX_RECONST = 'reconstruct_'
NB_THREADS = int(mproc.cpu_count() * .8)
VISUAL = True
def draw_reconstruction(path_out, name, segm_orig, segm_rect):
""" visualise reconstruction together with the original segmentation
:param path_out: str
:param name: str
:param segm_orig: np.array<height, width>
:param segm_rect: np.array<height, width>
"""
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(24, 12))
ax[0].set_title('original segmentation')
ax[0].imshow(1 - segm_orig, cmap='Greys'), ax[0].axis('off')
ax[0].contour(segm_rect, linewidth=2, cmap=plt.cm.jet)
ax[1].set_title('reconstructed segmentation')
ax[1].imshow(segm_rect), ax[1].axis('off')
ax[1].contour(segm_orig, linewidth=2)
p_fig = os.path.join(path_out, name + '.png')
fig.savefig(p_fig, bbox_inches='tight')
plt.close(fig)
def compute_reconstruction(dict_params, path_out, img_atlas, weights, img_name):
""" reconstruct the segmentation from atlas and particular weights
and compute the reconstruction error
:param dict_params:
:param img_atlas: np.array<height, width>
:param weights: [<0,1>]
:param img_name: str
:return: {str: float}
"""
segm_orig = load_segmentation(dict_params, img_name)
segm_rect = np.zeros_like(img_atlas)
for i, v in enumerate(weights):
lb = i + 1
if v == 1:
segm_rect[img_atlas == lb] = lb
logger.debug('segm unique: %s', repr(np.unique(segm_rect)))
if VISUAL:
try:
draw_reconstruction(path_out, img_name, segm_orig, segm_rect)
except:
logger.error(traceback.format_exc())
segm_bin = (segm_rect >= 1)
diff = np.sum(segm_orig != segm_bin) / float(np.prod(segm_orig.shape))
return img_name, diff
def find_relevant_atlas(name_csv, list_names_atlas):
""" find match of encode among all possible atlases
:param name_csv: str
:param list_names_atlas: [str]
:return: str
"""
list_atlas = [n.replace(PREFIX_ATLAS, '').replace('.png', '')
for n in list_names_atlas]
name = name_csv.replace(PREFIX_ENCODE, '').replace('.csv', '')
if name in list_atlas:
idx = list_atlas.index(name)
return list_names_atlas[idx]
def load_atlas_image(path_expt, name_atlas):
""" load the atlas as norm labels to be small natural ints
:param path_expt: str
:param name_atlas: str
:return: np.array<height, width>
"""
path_atlas = os.path.join(path_expt, name_atlas)
img_atlas = np.array(Image.open(path_atlas))
# norm image to have labels as [0, 1, 2, ...]
img_atlas /= (img_atlas.max() / len(np.unique(img_atlas)))
# subtract background (set it as -1)
# img_atlas -= 1
logger.debug('Atlas: %s with labels: %s', repr(img_atlas.shape),
repr(np.unique(img_atlas).tolist()))
return img_atlas
def load_segmentation(dict_params, img_name):
""" load the segmenattion with values {0, 1}
:param dict_params: {str: values}
:param img_name: str
:return: np.array<height, width>
"""
path_img = os.path.join(dict_params['path_in'], dict_params['dataset'],
dict_params['sub_dataset'], img_name + '.png')
img = np.array(Image.open(path_img))
img /= img.max()
return img
def perform_reconstruction(dict_params, df_encode, img_atlas):
""" with loaded encoding and atlas does reconstruction for each image
:param dict_params: {str: value}
:param df_encode: DF<images>
:param img_atlas: np.array<height, width>
:return: DF<images>
"""
list_patterns = [col for col in df_encode.columns if col.startswith('ptn ')]
logger.debug('list of pattern names: %s', repr(list_patterns))
df_diff = pd.DataFrame()
# walk over images
for idx, row in df_encode.iterrows():
weights = row[list_patterns].values
name, diff = compute_reconstruction(dict_params, img_atlas, weights,
row['image'])
df_diff = df_diff.append({'image': name, 'diff': diff}, ignore_index=True)
df_diff = df_diff.set_index('image')
logger.debug(repr(df_diff.describe()))
return df_diff
def mproc_wrapper(mp_tuple):
return compute_reconstruction(*mp_tuple)
def perform_reconstruction_mproc(dict_params, name_csv, df_encode, img_atlas):
path_out = os.path.join(dict_params['path_exp'],
name_csv.replace(PREFIX_ENCODE, PREFIX_RECONST))
if not os.path.exists(path_out):
os.mkdir(path_out)
list_patterns = [col for col in df_encode.columns if col.startswith('ptn ')]
logger.debug('list of pattern names: %s', repr(list_patterns))
mp_tuples = ((dict_params, path_out, img_atlas, row[list_patterns].values, idx)
for idx, row in df_encode.iterrows())
mproc_pool = mproc.Pool(NB_THREADS)
results = mproc_pool.map(mproc_wrapper, mp_tuples)
mproc_pool.close()
mproc_pool.join()
# results = map(mproc_wrapper, mp_tuples)
df_diff = pd.DataFrame(results, columns=['image', name_csv])
df_diff = df_diff.set_index('image')
logger.debug(repr(df_diff.describe()))
return df_diff
def process_experiment(path_expt=SAMPLE_PATH_EXPERIMENT):
""" process complete folder with experiment
:param path_expt: str
"""
logger.info('Experiment folder: \n "%s"', path_expt)
with open(os.path.join(path_expt, NAME_CONFIG), 'r') as fp:
dict_params = json.load(fp)
atlas_names = [os.path.basename(p) for p
in glob.glob(os.path.join(path_expt, PREFIX_ATLAS + '*.png'))]
list_csv = [p for p in glob.glob(os.path.join(path_expt, PREFIX_ENCODE + '*.csv'))
if not p.endswith('_gene.csv')]
df_diffs_all = pd.DataFrame()
for path_csv in list_csv:
name_csv = os.path.basename(path_csv)
name_atlas = find_relevant_atlas(name_csv, atlas_names)
logger.info('Atlas: "%s" -> Encoding: "%s"', name_atlas, name_csv)
# load the tlas
img_atlas = load_atlas_image(path_expt, name_atlas)
df_encode = pd.DataFrame.from_csv(path_csv)
df_diff = perform_reconstruction_mproc(dict_params, name_csv.replace('.csv', ''),
df_encode, img_atlas)
df_diffs_all = pd.concat([df_diffs_all, df_diff], axis=1)
df_diffs_all.to_csv(os.path.join(path_expt, 'reconstruction_diff.csv'))
if len(df_diffs_all) > 0:
logger.info(repr(df_diffs_all.describe()))
with open(os.path.join(path_expt, 'reconstruction_diff.txt'), 'w') as fp:
fp.write(repr(df_diffs_all.describe()))
else:
logger.error('no result parsed!')
def main(path_base=PATH_EXPERIMENTS):
""" process complete liat of experiments
:param path_base: str
"""
list_expt = [p for p in glob.glob(os.path.join(path_base, '*'))
if os.path.isdir(p)]
for i, path_expt in enumerate(list_expt):
logger.info('processing experiment %i / %', i + 1, len(list_expt))
process_experiment(path_expt)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger.info('running...')
# process_experiment()
main()
logger.info('DONE')
\ No newline at end of file
......@@ -36,11 +36,11 @@ PREFIX_VISU_SEGM = 'visu_segm_'
# size around image borders
NB_IMG_CORNER = 50
# ration how much backround has to be around borders
THR_CORNER_BG = 0.95
THRESHOLD_CORNER_BG = 0.95
# ration for total num,ber of backround
THT_BACKGROUND = 0.95
THRESHOLD_BACKGROUND = 0.95
# ration of bacground in object convex hull
THT_CONVEX = 0.85
THRESHOLD_CONVEX = 0.85
def labels_ration(path_seg):
......@@ -144,9 +144,9 @@ def segm_decision(l_desc, dict_hist):
fg = desc['lb_hist'].get(1, 0) + desc['lb_hist'].get(2, 0)
b_range = abs(fg - fg_median) <= 3 * fg_std
if b_range \
and desc['lb_hist'][0] < THT_BACKGROUND \
and desc['r_bg'] > THR_CORNER_BG \
and desc['r_cx'] > THT_CONVEX:
and desc['lb_hist'][0] < THRESHOLD_BACKGROUND \
and desc['r_bg'] > THRESHOLD_CORNER_BG \
and desc['r_cx'] > THRESHOLD_CONVEX:
l_good.append(desc['name'])
else:
l_fail.append(desc['name'])
......
......@@ -147,7 +147,7 @@ def visual_pair_orig_segm(d_paths, img, seg, n_img):
# ax[0].imshow(seg_sml), plt.axis('off')
p_fig = os.path.join(d_paths['p_visu'], PREFIX_VISU_SEGM + n_img)
fig.savefig(p_fig, bbox_inches='tight')
plt.close()
plt.close(fig)
def visual_pipeline(d_paths, img, im_norm, seg_raw, seg, n_img):
......@@ -172,7 +172,7 @@ def visual_pipeline(d_paths, img, im_norm, seg_raw, seg, n_img):
# ax[0].imshow(seg_sml), plt.axis('off')
p_fig = os.path.join(d_paths['p_visu'], PREFIX_VISU_PIPELINE + n_img)
fig.savefig(p_fig, bbox_inches='tight')
plt.close()
plt.close(fig)
def draw_img_histogram(ax, img):
......@@ -206,7 +206,7 @@ def visual_pair_orig_norm(d_paths, img, im_norm, n_img):
p_fig = os.path.join(d_paths['p_visu'], PREFIX_VISU_NORM +
n_img.replace('.png', '.pdf'))
fig.savefig(p_fig, bbox_inches='tight')
plt.close()
plt.close(fig)
def preprocessing_image(img):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment