Commit 9d3929b1 authored by Jiri Borovec's avatar Jiri Borovec

docString & repaire step

parent d09ea5ae
# Intro
# Data
Actually we are working on with synthetic datset which was generated using
containing script - generate_dataset.py with some default parameters
all data are stored on standard paths within the university datagrid
# Experiments
all experiment ar located in experiments.py
1. simple check weher the unary cost is computet correctly in 3 simple images
2. performing experiment on synthetic clear dataset
\ No newline at end of file
This diff is collapsed.
......@@ -8,80 +8,71 @@ import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
sys.path.append(os.path.abspath(os.path.join('..','..'))) # Add path to root
# import src.ownUtils.toolDataIO as tD
import generate_dataset as genAPD
import dictionary_learning as DL
import pattern_weights as encode
import similarity_metric as metric
import generate_dataset as gen_data
import dictionary_learning as dl
import pattern_disctionary as ptn_dict
import pattern_weights as ptn_weigth
import logging
logger = logging.getLogger(__name__)
def experiment_pipelineALPE(atlas, imgs, encoding):
initAtlas_org = DL.initialise_atlas_deform_original(atlas)
initAtlas_rnd = DL.initialise_atlas_random(atlas.shape, np.max(atlas))
initAtlas_msc = DL.initialise_atlas_mosaic(atlas.shape, np.max(atlas))
initEncode_rnd = DL.initialise_weights_random(len(imgs), np.max(atlas))
def experiment_pipeline_alpe(atlas, imgs, encoding):
initAtlas_org = ptn_dict.initialise_atlas_deform_original(atlas)
initAtlas_rnd = ptn_dict.initialise_atlas_random(atlas.shape, np.max(atlas))
initAtlas_msc = ptn_dict.initialise_atlas_mosaic(atlas.shape, np.max(atlas))
initEncode_rnd = ptn_weigth.initialise_weights_random(len(imgs), np.max(atlas))
pOut = os.path.join('..','..','output')
# DL.pipeline_estim_atlas_learning_ptn_weights(imgs, initAtlas=initAtlas_msc,
# maxIter=9, reInit=False, outDir=pOut, outPrefix='mosaic')
# _ = input('wait')
DL.pipeline_estim_atlas_learning_ptn_weights(imgs, init_atlas=initAtlas_rnd,
max_iter=9, reinit=True, out_dir=pOut, out_prefix='random')
# DL.pipeline_estim_atlas_learning_ptn_weights(imgs, initEncode=initEncode_rnd, outDir=pOut)
# dl.apd_pipe_atlas_learning_ptn_weights(imgs, initAtlas=initAtlas_msc,
# maxIter=9, reInit=False, outDir=pOut, outPrefix='mosaic')
dl.apd_pipe_atlas_learning_ptn_weights(imgs, init_atlas=initAtlas_rnd,
max_iter=9, out_dir=pOut, out_prefix='rnd')
return None
def getSimpleAtlas():
atlas = np.zeros((20,20))
atlas[2:8,2:8] = 1
atlas[12:18,12:18] = 2
atlas[2:8,12:18] = 3
return atlas
def getSampleImages(atlas):
img1 = atlas.copy()
img1[img1>=2] = 0
img2 = atlas.copy()
img2[img2<=1] = 0
img2[img2>0] = 1
return img1, img2
def test_simpleCase():
def test_simple_case():
# implement simple case just with 2 images and 2/3 classes in atlas
atlas = getSimpleAtlas()
atlas2 = atlas.copy()
atlas2[atlas2>2] = 0
img1, img2 = getSampleImages(atlas)
w1, w2 = [1,0,0], [0,1,0]
w3, w4 = [1,0,1], [0,1,1]
imgs = [img1, img2]
for j, ws in enumerate([[w1, w2],[w3, w4]]):
atlas = gen_data.get_simple_atlas()
# atlas2 = atlas.copy()
# atlas2[atlas2>2] = 0
imgs = gen_data.get_sample_images(atlas)
l_ws = [([1,0,0], [0,1,0], [0,0,1]),
([1,0,1], [0,1,1], [0,0,1])]
for j, ws in enumerate(l_ws):
plt.figure()
plt.title('w: {}'.format(repr(ws)))
gs = gridspec.GridSpec(2,4)
# plt.subplot(gs[0, 1]), plt.imshow(atlas, interpolation='nearest'), plt.title('atlas')
# plt.subplot(gs[0, 2]), plt.imshow(atlas2, interpolation='nearest'), plt.title('atlas')
plt.subplot(gs[0, 1]), plt.imshow(img1, cmap='gray', interpolation='nearest'), plt.title('w:{}'.format(ws[0]))
plt.subplot(gs[0, 2]), plt.imshow(img2, cmap='gray', interpolation='nearest'), plt.title('w:{}'.format(ws[1]))
uc = DL.compute_relative_penaly_images_weights(imgs, np.array(ws))
gs = gridspec.GridSpec(2, len(imgs) + 2)
plt.subplot(gs[0, 0]), plt.title('atlas')
cm = plt.cm.get_cmap('jet', len(np.unique(atlas)))
plt.imshow(atlas, cmap=cm, interpolation='nearest'), plt.colorbar()
for i, (img, w) in enumerate(zip(imgs, ws)):
plt.subplot(gs[0, i + 1]), plt.title('w:{}'.format(w))
plt.imshow(img, cmap='gray', interpolation='nearest')
uc = dl.compute_relative_penaly_images_weights(imgs, np.array(ws))
res = dl.estimate_atlas_graphcut_general(imgs, np.array(ws), 0.)
plt.subplot(gs[0, -1]), plt.title('result')
plt.imshow(res, cmap=cm, interpolation='nearest'), plt.colorbar()
uc = uc.reshape(atlas.shape+uc.shape[2:])
# logger.debug(ws)
for i in range(uc.shape[2]):
plt.subplot(gs[1, i]), plt.imshow(uc[:,:,i], vmin=0, vmax=1, interpolation='nearest')
plt.subplot(gs[1, i])
plt.imshow(uc[:,:,i], vmin=0, vmax=1, interpolation='nearest')
plt.title('cost lb #{}'.format(i)), plt.colorbar()
# logger.debug(uc)
return None
def main():
atlas = genAPD.dataset_createAtlas()
atlas = gen_data.dataset_create_atlas()
# plt.imshow(atlas)
imgs = genAPD.dataset_load_images()
imgs = gen_data.dataset_load_images()
# plt.imshow(imgs[0])
encoding = genAPD.dataset_load_encoding()
encoding = gen_data.dataset_load_weights()
# logger.info('encoding: {}'.format(encoding))
experiment_pipelineALPE(atlas, imgs, encoding)
experiment_pipeline_alpe(atlas, imgs, encoding)
return None
if __name__ == "__main__":
......@@ -91,7 +82,7 @@ if __name__ == "__main__":
# test_encoding(atlas, imgs, encoding)
# test_atlasLearning(atlas, imgs, encoding)
# test_simpleCase()
# test_simple_case()
main()
......
This diff is collapsed.
import numpy as np
import generate_dataset as data
from skimage import morphology
import logging
logger = logging.getLogger(__name__)
def initialise_atlas_random(im_size, max_lb):
""" initialise atlas with random labels
:param im_size: (w, h) size og image
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas {} as random labeling'.format(im_size))
nb_lbs = max_lb + 1
im = np.random.randint(0, nb_lbs, im_size)
return np.array(im, dtype=np.int)
def initialise_atlas_mosaic(im_size, max_lb):
""" generate grids trusture and into each rectangle plase a label,
each row contains all labels (permutation)
:param im_size: (w, h) size og image
:param max_lb: int, number of labels
:return: np.array<w, h>
"""
logger.debug('initialise atlas {} as grid labeling'.format(im_size))
nb_lbs = max_lb + 1
block = np.ones(np.ceil(im_size / np.array(nb_lbs, dtype=np.float)))
logger.debug('block size is {}'.format(block.shape))
for l in range(nb_lbs):
idx = np.random.permutation(range(nb_lbs))
for k in range(nb_lbs):
b = block.copy() * idx[k]
if k == 0:
row = b
else:
row = np.hstack((row, b))
if l == 0: mosaic = row
else: mosaic = np.vstack((mosaic, row))
logger.debug('generated mosaic {} with labeling'.format(mosaic.shape,
np.unique(mosaic).tolist()))
im = mosaic[:im_size[0], :im_size[1]]
return np.array(im, dtype=np.int)
def initialise_atlas_deform_original(atlas):
"""take the orginal atlas and use geometrical deformation
to generate new deformed atlas
:param atlas: np.array<w, h>
:return: np.array<w, h>
"""
logger.debug('initialise atlas by deforming original one')
res = data.image_deform_elastic(atlas)
return np.array(res, dtype=np.int)
def reconstruct_samples(atlas, weights):
""" create reconstruction of binary images according given atlas and weights
:param atlas: np.array<w, h> input atlas
:param weihts: np.array<nb_imgs, nb_lbs>
:return: [np.array<w, h>] * nb_imgs
"""
w_bin = np.array(weights)
weights_ext = np.append(np.zeros((w_bin.shape[0], 1)), weights, axis=1)
imgs = [None] * len(weights)
for i, w in enumerate(weights_ext):
imgs[i] = np.asarray(w)[np.asarray(atlas)]
return imgs
def get_prototype_new_pattern(imgs, imgs_rc, diffs, atlas):
""" estimate new pattern that occurs in input images and is not cover
by any label in actual atlas, remove collision with actual atlas
:param imgs: [np.array<w, h>] list of input images
:param imgs_rc: [np.array<w, h>] list of image reconstructions
:param diffs: [int] list of differences among input and reconstruct images
:param atlas: np.array<w, h> m-label atlas for further collision removing
:return: np.array<w, h> binary single pattern
"""
id_max = np.argmax(diffs)
im_diff = (imgs[id_max] - imgs_rc[id_max])
# take just positive differences
im_diff = im_diff > 0
im_diff = morphology.closing(im_diff, morphology.disk(1))
# find largesr connected component
ptn = data.extract_image_largest_element(im_diff)
atlas_diff = im_diff - (atlas > 0)
ptn = atlas_diff > 0
return ptn
def insert_new_pattern(imgs, w_bin, atlas, lb):
""" with respect to atlas empty spots inset new patterns
:param imgs: [np.array<w, h>] list of input images
:param w_bin: np.array<nb_imgs, nb_lbs>
:param atlas: np.array<w, h>
:param lb: int
:return: np.array<w, h> updated atlas
"""
imgs_rc = reconstruct_samples(atlas, w_bin)
diffs = []
# count just positive difference
for im, im_rc in zip(imgs, imgs_rc):
diff = np.sum((im - im_rc) > 0)
diffs.append(diff)
im_ptn = get_prototype_new_pattern(imgs, imgs_rc, diffs, atlas)
atlas[im_ptn == True] = lb
return atlas
def reinit_atlas_likely_patterns(imgs, w_bin, atlas):
""" walk and find all all free labels and try to reinit them by new patterns
:param imgs: [np.array<w, h>] list of input images
:param w_bin: np.array<nb_imgs, nb_lbs>
:param atlas: np.array<w, h>
:return:
"""
# find empty patterns
sums = np.sum(w_bin, axis=0)
logger.debug('IN > sum over weights: {}'.format(sums))
for i, v in enumerate(sums):
if v == 0:
atlas = insert_new_pattern(imgs, w_bin, atlas, i)
return atlas
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('DONE')
\ No newline at end of file
import numpy as np
import generate_dataset as data
from skimage import morphology
import logging
logger = logging.getLogger(__name__)
def initialise_weights_random(nb_imgs, nb_lbs, ratio_sel=0.2):
"""
:param nb_imgs: int, numer of all images
:param nb_lbs: int, numer of all avalaible labels
:param ratio_sel: float<0, 1> defining how many should be set on,
1 means all and 0 means none
:return: np.array<nb_imgs, nb_lbs>
"""
logger.debug('initialise weights for {} images and {} labels '
'as random selection'.format(nb_imgs, nb_lbs))
prob = np.random.random((nb_imgs, nb_lbs))
weights = np.zeros_like(prob)
weights[prob <= ratio_sel] = 1
return weights
def convert_weights_binary2indexes(weights):
""" convert binary matrix oof weights to list of indexes o activated ptns
:param weights: np.array<nb_imgs, nb_lbs>
:return: [[int, ...]] * nb_imgs
"""
logger.debug('convert binary weights {} '
'to list of indexes with True'.format(weights.shape))
# if type(weights)==np.ndarray: weights = weights.tolist()
......@@ -19,20 +42,38 @@ def convert_weights_binary2indexes(weights):
return w_idx
def weighs_image_atlas_overlap_major(img, atlas):
def weights_image_atlas_overlap_major(img, atlas):
"""
:param img: np.array<w, h>
:param atlas: np.array<w, h>
:return: [int] * nb_lbs of values {0, 1}
"""
# logger.debug('weights input image according given atlas')
weights = weighs_image_atlas_overlap_threshold(img, atlas, 0.5)
weights = weights_image_atlas_overlap_threshold(img, atlas, 0.5)
return weights
def weighs_image_atlas_overlap_partial(img, atlas):
def weights_image_atlas_overlap_partial(img, atlas):
"""
:param img: np.array<w, h>
:param atlas: np.array<w, h>
:return: [int] * nb_lbs of values {0, 1}
"""
# logger.debug('weights input image according given atlas')
lbs = np.unique(atlas).tolist()
weights = weighs_image_atlas_overlap_threshold(img, atlas, (1. / len(lbs)))
weights = weights_image_atlas_overlap_threshold(img, atlas, (1. / len(lbs)))
return weights
def weighs_image_atlas_overlap_threshold(img, atlas, thr=0.5):
def weights_image_atlas_overlap_threshold(img, atlas, thr=0.5):
""" estimate what patterns are activated with given atlas and input image
compute overlap matrix and eval nr of overlapping and non pixels and threshold
:param img: np.array<w, h>
:param atlas: np.array<w, h>
:param thr: float, represent the ration between overlapping and non pixels
:return: [int] * nb_lbs of values {0, 1}
"""
# logger.debug('weights input image according given atlas')
# simple weight
lbs = np.unique(atlas).tolist()
......@@ -48,16 +89,6 @@ def weighs_image_atlas_overlap_threshold(img, atlas, thr=0.5):
return weight
def fill_empty_patterns(w_bin):
sums = np.sum(w_bin, axis=0)
logger.debug('IN > sum over weights: {}'.format(sums))
for i, v in enumerate(sums):
if v == 0:
w_bin[:, i] = 1
logger.debug('OUT < sum over weights: {}'.format(np.sum(w_bin, axis=0)))
return w_bin
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
......
......@@ -5,6 +5,13 @@ logger = logging.getLogger(__name__)
def compare_atlas_rnd_pairs(a1, a2):
""" compare two atlases as taking random pixels pairs from both
and evaluate that the are labeled equally of differently
:param a1: np.array<w, h>
:param a2: np.array<w, h>
:return: float with 0 means no difference
"""
logger.debug('comparing two atlases '
'of shapes {} <-> {}'.format(a1.shape, a2.shape))
assert np.array_equal(a1.shape, a2.shape)
......@@ -38,6 +45,13 @@ def compare_atlas_rnd_pairs(a1, a2):
def compare_atlas_adjusted_rand(a1, a2):
""" using adjusted rand and transform original values from (-1, 1) to (0, 1)
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.adjusted_rand_score.html
:param a1: np.array<w, h>
:param a2: np.array<w, h>
:return: float with 0 means no difference
"""
assert np.array_equal(a1.shape, a2.shape)
ars = metrics.adjusted_rand_score(a1.ravel(), a2.ravel())
res = 0.5 - (ars / 2.)
......@@ -45,6 +59,11 @@ def compare_atlas_adjusted_rand(a1, a2):
def overlap_matrix_mlabel_segm(seg1, seg2):
"""
:param seg1: np.array<w, h>
:param seg2: np.array<w, h>
:return: np.array<w, h>
"""
logger.debug('computing overlap of two segm '
'of shapes {} <-> {}'.format(seg1.shape, seg2.shape))
assert np.array_equal(seg1.shape, seg2.shape)
......@@ -64,16 +83,29 @@ def overlap_matrix_mlabel_segm(seg1, seg2):
def compare_matrices(m1, m2):
"""
:param m1: np.array<w, h>
:param m2: np.array<w, h>
:return: float
"""
assert np.array_equal(m1.shape, m2.shape)
diff = np.sum(abs(m1 - m2))
return diff / float(np.product(m1.shape))
def compare_weights(c1, c2):
"""
:param c1: np.array<w, h>
:param c2: np.array<w, h>
:return: float
"""
return compare_matrices(c1, c2)
def test_atlases():
"""
:return:
"""
logger.info('testing METRIC')
a = np.random.randint(0,4,(5,5))
a2 = a.copy()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment