Commit 2dac32d9 authored by Jirka's avatar Jirka

pep8

parent 58d26112
......@@ -8,36 +8,38 @@ import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
sys.path.append(os.path.abspath(os.path.join('..','..'))) # Add path to root
# import src.ownUtils.toolDataIO as tD
import generateDataset as genAPD
import dictionaryLearning as DL
import patternWeights as encode
import similarityMetric as metric
import generate_dataset as genAPD
import dictionary_learning as DL
import pattern_weights as encode
import similarity_metric as metric
import logging
logger = logging.getLogger(__name__)
def experiment_pipelineALPE (atlas, imgs, encoding) :
initAtlas_org = DL.initialiseAtlas_deformOriginal(atlas)
initAtlas_rnd = DL.initialiseAtlas_random(atlas.shape, np.max(atlas))
initAtlas_msc = DL.initialiseAtlas_mosaic(atlas.shape, np.max(atlas))
initEncode_rnd = DL.initialiseWeights_random(len(imgs), np.max(atlas))
def experiment_pipelineALPE(atlas, imgs, encoding):
initAtlas_org = DL.initialise_atlas_deform_original(atlas)
initAtlas_rnd = DL.initialise_atlas_random(atlas.shape, np.max(atlas))
initAtlas_msc = DL.initialise_atlas_mosaic(atlas.shape, np.max(atlas))
initEncode_rnd = DL.initialise_weights_random(len(imgs), np.max(atlas))
pOut = os.path.join('..','..','output')
DL.pipeline_estimAtlasLearningPatternWeights(imgs, initAtlas=initAtlas_org, maxIter=9, reInit=False, outDir=pOut, outPrefix='d1')
# DL.pipeline_estim_atlas_learning_ptn_weights(imgs, initAtlas=initAtlas_msc,
# maxIter=9, reInit=False, outDir=pOut, outPrefix='mosaic')
# _ = input('wait')
DL.pipeline_estimAtlasLearningPatternWeights(imgs, initAtlas=initAtlas_org, maxIter=9, reInit=True, outDir=pOut, outPrefix='d2')
# DL.pipeline_estimAtlasLearningPatternWeights(imgs, initEncode=initEncode_rnd, outDir=pOut)
DL.pipeline_estim_atlas_learning_ptn_weights(imgs, init_atlas=initAtlas_rnd,
max_iter=9, reinit=True, out_dir=pOut, out_prefix='random')
# DL.pipeline_estim_atlas_learning_ptn_weights(imgs, initEncode=initEncode_rnd, outDir=pOut)
return None
def getSimpleAtlas () :
def getSimpleAtlas():
atlas = np.zeros((20,20))
atlas[2:8,2:8] = 1
atlas[12:18,12:18] = 2
atlas[2:8,12:18] = 3
return atlas
def getSampleImages (atlas) :
def getSampleImages(atlas):
img1 = atlas.copy()
img1[img1>=2] = 0
img2 = atlas.copy()
......@@ -45,7 +47,7 @@ def getSampleImages (atlas) :
img2[img2>0] = 1
return img1, img2
def test_simpleCase () :
def test_simpleCase():
# implement simple case just with 2 images and 2/3 classes in atlas
atlas = getSimpleAtlas()
atlas2 = atlas.copy()
......@@ -54,7 +56,7 @@ def test_simpleCase () :
w1, w2 = [1,0,0], [0,1,0]
w3, w4 = [1,0,1], [0,1,1]
imgs = [img1, img2]
for j, ws in enumerate([[w1, w2],[w3, w4]]) :
for j, ws in enumerate([[w1, w2],[w3, w4]]):
plt.figure()
plt.title('w: {}'.format(repr(ws)))
gs = gridspec.GridSpec(2,4)
......@@ -62,7 +64,7 @@ def test_simpleCase () :
# plt.subplot(gs[0, 2]), plt.imshow(atlas2, interpolation='nearest'), plt.title('atlas')
plt.subplot(gs[0, 1]), plt.imshow(img1, cmap='gray', interpolation='nearest'), plt.title('w:{}'.format(ws[0]))
plt.subplot(gs[0, 2]), plt.imshow(img2, cmap='gray', interpolation='nearest'), plt.title('w:{}'.format(ws[1]))
uc = DL.computeRelativePenalyFromImagesWeights(imgs, np.array(ws))
uc = DL.compute_relative_penaly_images_weights(imgs, np.array(ws))
uc = uc.reshape(atlas.shape+uc.shape[2:])
# logger.debug(ws)
for i in range(uc.shape[2]):
......@@ -71,13 +73,13 @@ def test_simpleCase () :
# logger.debug(uc)
return None
def main () :
def main():
atlas = genAPD.dataset_createAtlas()
# plt.imshow(atlas)
imgs = genAPD.dataset_loadImages()
imgs = genAPD.dataset_load_images()
# plt.imshow(imgs[0])
encoding = genAPD.dataset_loadEncoding()
# logger.info('encoding : {}'.format(encoding))
encoding = genAPD.dataset_load_encoding()
# logger.info('encoding: {}'.format(encoding))
experiment_pipelineALPE(atlas, imgs, encoding)
return None
......@@ -89,9 +91,9 @@ if __name__ == "__main__":
# test_encoding(atlas, imgs, encoding)
# test_atlasLearning(atlas, imgs, encoding)
test_simpleCase()
# test_simpleCase()
# main()
main()
logger.info('DONE')
plt.show()
\ No newline at end of file
This diff is collapsed.
__author__ = 'Jiri Borovec'
import os, sys
import numpy as np
import logging
logger = logging.getLogger(__name__)
def convertWeightsBinary2indexes (weights) :
logger.debug('convert binary weights {} to list of indexes with True'.format(weights.shape))
# if type(weights)==np.ndarray : weights = weights.tolist()
weighIdx = [None] * weights.shape[0]
for i in range(weights.shape[0]) :
# find postions equal 1
# vec = [j for j in range(weights.shape[1]) if weights[i,j]==1]
vec = np.where(weights[i, :] == 1)[0]
weighIdx[i] = vec +1
# idxs = np.where(weights == 1)
# for i in range(weights.shape[0]) :
# weighIdx[i] = idxs[1][idxs[0]==i] +1
return weighIdx
def weighsImageByAtlas_overlapMajor (img, atlas) :
# logger.debug('weights input image according given atlas')
weights = weighsImageByAtlas_overlapTreshold(img, atlas, 0.5)
return weights
def weighsImageByAtlas_overlapPartial (img, atlas) :
# logger.debug('weights input image according given atlas')
lbs = np.unique(atlas).tolist()
weights = weighsImageByAtlas_overlapTreshold(img, atlas, (1. / len(lbs)))
return weights
def weighsImageByAtlas_overlapTreshold (img, atlas, thr=0.5) :
# logger.debug('weights input image according given atlas')
# simple weight
lbs = np.unique(atlas).tolist()
# logger.debug('weights image by atlas with labels: {}'.format(lbs))
if 0 in lbs : lbs.remove (0)
weight = [0] * np.max(lbs)
for l in lbs :
equal = np.sum( img[atlas==l] )
total = np.sum( atlas==l )
score = equal / float(total)
if score >= thr :
weight[l-1] = 1
return weight
def fillEmptyPatterns (binWeighs) :
sums = np.sum(binWeighs, axis=0)
logger.debug('IN > sum over weights: {}'.format(sums))
for i, v in enumerate(sums) :
if v == 0 :
binWeighs[:, i] = 1
logger.debug('OUT < sum over weights: {}'.format(np.sum(binWeighs, axis=0)))
return binWeighs
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
import numpy as np
import logging
logger = logging.getLogger(__name__)
def convert_weights_binary2indexes(weights):
logger.debug('convert binary weights {} '
'to list of indexes with True'.format(weights.shape))
# if type(weights)==np.ndarray: weights = weights.tolist()
w_idx = [None] * weights.shape[0]
for i in range(weights.shape[0]):
# find postions equal 1
# vec = [j for j in range(weights.shape[1]) if weights[i,j]==1]
vec = np.where(weights[i,:] == 1)[0]
w_idx[i] = vec +1
# idxs = np.where(weights == 1)
# for i in range(weights.shape[0]):
# w_idx[i] = idxs[1][idxs[0]==i] +1
return w_idx
def weighs_image_atlas_overlap_major(img, atlas):
# logger.debug('weights input image according given atlas')
weights = weighs_image_atlas_overlap_threshold(img, atlas, 0.5)
return weights
def weighs_image_atlas_overlap_partial(img, atlas):
# logger.debug('weights input image according given atlas')
lbs = np.unique(atlas).tolist()
weights = weighs_image_atlas_overlap_threshold(img, atlas, (1. / len(lbs)))
return weights
def weighs_image_atlas_overlap_threshold(img, atlas, thr=0.5):
# logger.debug('weights input image according given atlas')
# simple weight
lbs = np.unique(atlas).tolist()
# logger.debug('weights image by atlas with labels: {}'.format(lbs))
if 0 in lbs: lbs.remove(0)
weight = [0] * np.max(lbs)
for l in lbs:
equal = np.sum(img[atlas==l])
total = np.sum(atlas==l)
score = equal / float(total)
if score >= thr:
weight[l-1] = 1
return weight
def fill_empty_patterns(w_bin):
sums = np.sum(w_bin, axis=0)
logger.debug('IN > sum over weights: {}'.format(sums))
for i, v in enumerate(sums):
if v == 0:
w_bin[:, i] = 1
logger.debug('OUT < sum over weights: {}'.format(np.sum(w_bin, axis=0)))
return w_bin
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger.info('DONE')
\ No newline at end of file
__author__ = 'Jiri Borovec'
import os, sys
import numpy as np
from sklearn import metrics
import logging
logger = logging.getLogger(__name__)
def compareAtlas_randomPositions (A1, A2) :
logger.debug('comparing two atlases of shapes {} <-> {}'.format(A1.shape, A2.shape))
assert np.array_equal(A1.shape, A2.shape)
# assert A1.shape[0]==A2.shape[0] and A1.shape[1]==A2.shape[1]
logger.debug('unique labels are {} and {}'.format(np.unique(A1).tolist(), np.unique(A2).tolist()))
X, Y = np.meshgrid(range(A1.shape[0]), range(A1.shape[1]))
vecX, vecY = X.flatten(), Y.flatten()
vecX_perm = np.random.permutation(vecX)
vecY_perm = np.random.permutation(vecY)
diffs = 0
for x1, y1, x2, y2 in zip(vecX, vecY, vecX_perm, vecY_perm) :
b1 = A1[x1,y1] == A1[x2,y2]
b2 = A2[x1,y1] == A2[x2,y2]
if not b1 == b2 : # T&F or F&T
# logger.debug('coords [{},{}], [{},{}] gives vals: {}, {}'.format(x1, y1, x2, y2, b1, b2))
# logger.debug('diff: {}?={} and {}?={}'.format(A1[x1,y1], A1[x2,y2], A2[x1,y1], A2[x2,y2]))
diffs += 1
res = diffs / float(len(vecX))
return res
# Rand index, o tom uz jsme mluvili.
# Ted jde jen o to, jak tento index vypocitat efektivne. Coz, kdyz si
# rozmyslite, neni prilis tezke: udelate kontingencni tabulku=histogram, tedy oznacime n_ij kolik pixelu na stejnych
# souradnicich patri do tridy 'i'v prvnim obrazku a do tridy 'j' v druhem. Pak pocet shod (dva prvky jsou ve strejne
# tride v prvnim obrazku a ve stejne tride v druhem, nebo jsou v ruznych tridach v prvnim i v druhem) lze vyjadrit
# z histogramu - viz vzorec (1), clanek http://link.springer.com/article/10.1007%2FBF01908075
#
# Existuje i Adjusted Random Index, viz stejny clanek, nebo taky https://en.wikipedia.org/wiki/Rand_index
# Pro nase ucely je to asi jedno, proto budeme chtit jen porovnavat metody mezi sebou.
def compareAtlas_adjustedRand (A1, A2) :
assert np.array_equal(A1.shape, A2.shape)
ars = metrics.adjusted_rand_score(A1.ravel(), A2.ravel())
res = 0.5 - (ars / 2.)
return res
def overlapMatrixMultilabelSegm (seg1, seg2) :
logger.debug('computing overlap of two segm of shapes {} <-> {}'.format(seg1.shape, seg2.shape))
assert np.array_equal(seg1.shape, seg2.shape)
uLb1 = np.unique(seg1)
uLb2 = np.unique(seg2)
uLb1 = dict(zip(uLb1,range(len(uLb1))))
uLb2 = dict(zip(uLb2,range(len(uLb2))))
logger.debug('unique labels:\n {}\n {}'.format(uLb1, uLb2))
res = np.zeros([len(uLb1), len(uLb2)])
for i in range(seg1.shape[0]) :
for j in range(seg1.shape[1]) :
u1, u2 = uLb1[seg1[i, j]], uLb2[seg2[i, j]]
res[u1, u2] += 1
res[u2, u1] += 1
# logger.debug(res)
return res
def compareMatrices (M1, M2) :
assert np.array_equal(M1.shape, M2.shape)
diff = np.sum(abs(M1 - M2))
return diff / float(np.product(M1.shape))
def compareWeights (C1, C2) :
return compareMatrices(C1, C2)
def test_Atlases () :
logger.info('testing METRIC')
A = np.random.randint(0,4, (5,5))
A2 = A.copy()
A2[A2==0] = -1
B = np.random.randint(0,4, (5,5))
logger.debug('compareAtlas_randomPositions, A <-> A: {}'.format(compareAtlas_randomPositions(A,A2)))
logger.debug('compareAtlas_randomPositions, A <-> B: {}'.format(compareAtlas_randomPositions(A,B)))
logger.debug('compareAtlas_adjustedRand, A <-> A: {}'.format(compareAtlas_adjustedRand(A,A2)))
logger.debug('compareAtlas_adjustedRand, A <-> B: {}'.format(compareAtlas_adjustedRand(A,B)))
return None
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test_Atlases()
import numpy as np
from sklearn import metrics
import logging
logger = logging.getLogger(__name__)
def compare_atlas_rnd_pairs(a1, a2):
logger.debug('comparing two atlases '
'of shapes {} <-> {}'.format(a1.shape, a2.shape))
assert np.array_equal(a1.shape, a2.shape)
# assert A1.shape[0]==A2.shape[0] and A1.shape[1]==A2.shape[1]
logger.debug('unique labels are {} and {}'.format(np.unique(a1).tolist(), np.unique(a2).tolist()))
X, Y = np.meshgrid(range(a1.shape[0]), range(a1.shape[1]))
vec_x, vec_y = X.flatten(), Y.flatten()
vec_x_perm = np.random.permutation(vec_x)
vec_y_perm = np.random.permutation(vec_y)
diffs = 0
for x1, y1, x2, y2 in zip(vec_x, vec_y, vec_x_perm, vec_y_perm):
b1 = a1[x1, y1] == a1[x2, y2]
b2 = a2[x1, y1] == a2[x2, y2]
if not b1 == b2: # T&F or F&T
# logger.debug('coords [{},{}], [{},{}] gives vals: {}, {}'.format(x1, y1, x2, y2, b1, b2))
# logger.debug('diff: {}?={} and {}?={}'.format(A1[x1,y1], A1[x2,y2], A2[x1,y1], A2[x2,y2]))
diffs += 1
res = diffs / float(len(vec_x))
return res
# Rand index, o tom uz jsme mluvili.
# Ted jde jen o to, jak tento index vypocitat efektivne. Coz, kdyz si
# rozmyslite, neni prilis tezke: udelate kontingencni tabulku=histogram, tedy oznacime n_ij kolik pixelu na stejnych
# souradnicich patri do tridy 'i'v prvnim obrazku a do tridy 'j' v druhem. Pak pocet shod(dva prvky jsou ve strejne
# tride v prvnim obrazku a ve stejne tride v druhem, nebo jsou v ruznych tridach v prvnim i v druhem) lze vyjadrit
# z histogramu - viz vzorec(1), clanek http://link.springer.com/article/10.1007%2FBF01908075
#
# Existuje i Adjusted Random Index, viz stejny clanek, nebo taky https://en.wikipedia.org/wiki/Rand_index
# Pro nase ucely je to asi jedno, proto budeme chtit jen porovnavat metody mezi sebou.
def compare_atlas_adjusted_rand(a1, a2):
assert np.array_equal(a1.shape, a2.shape)
ars = metrics.adjusted_rand_score(a1.ravel(), a2.ravel())
res = 0.5 - (ars / 2.)
return res
def overlap_matrix_mlabel_segm(seg1, seg2):
logger.debug('computing overlap of two segm '
'of shapes {} <-> {}'.format(seg1.shape, seg2.shape))
assert np.array_equal(seg1.shape, seg2.shape)
u_lb1 = np.unique(seg1)
u_lb2 = np.unique(seg2)
u_lb1 = dict(zip(u_lb1,range(len(u_lb1))))
u_lb2 = dict(zip(u_lb2,range(len(u_lb2))))
logger.debug('unique labels:\n {}\n {}'.format(u_lb1, u_lb2))
res = np.zeros([len(u_lb1), len(u_lb2)])
for i in range(seg1.shape[0]):
for j in range(seg1.shape[1]):
u1, u2 = u_lb1[seg1[i, j]], u_lb2[seg2[i, j]]
res[u1, u2] += 1
res[u2, u1] += 1
# logger.debug(res)
return res
def compare_matrices(m1, m2):
assert np.array_equal(m1.shape, m2.shape)
diff = np.sum(abs(m1 - m2))
return diff / float(np.product(m1.shape))
def compare_weights(c1, c2):
return compare_matrices(c1, c2)
def test_atlases():
logger.info('testing METRIC')
a = np.random.randint(0,4,(5,5))
a2 = a.copy()
a2[a2==0] = -1
b = np.random.randint(0,4,(5,5))
logger.debug('compare_atlas_rnd_pairs, a <-> a: {}'.format(compare_atlas_rnd_pairs(a, a2)))
logger.debug('compare_atlas_rnd_pairs, a <-> b: {}'.format(compare_atlas_rnd_pairs(a, b)))
logger.debug('compare_atlas_adjusted_rand, a <-> a: {}'.format(compare_atlas_adjusted_rand(a, a2)))
logger.debug('compare_atlas_adjusted_rand, a <-> b: {}'.format(compare_atlas_adjusted_rand(a, b)))
return None
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test_atlases()
logger.info('DONE')
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment