Skip to content
Snippets Groups Projects
Commit 69143da2 authored by Kristýna Janků's avatar Kristýna Janků
Browse files

Created par_crop for Fluo-N2DH-SIM+ dataset

parent b1ca921b
No related branches found
No related tags found
No related merge requests found
......@@ -30,7 +30,7 @@ for video in video_list:
snippets[video][obj_id] = dict()
for frame in frames:
# Load segmented image, gold truth if available, silver truth otherwise
# Load segmented image
seg = glob.glob(os.path.join(data_path, video + '_GT', 'SEG', '*' + frame + '*'))
seg_image = cv2.imread(seg[0], cv2.IMREAD_ANYDEPTH)
......@@ -54,8 +54,10 @@ for video in video_list:
# Divide into training and validation data
train = {k: v for (k, v) in snippets.items() if '02' in k}
val = {k: v for (k, v) in snippets.items() if '01' in k}
all = {k: v for (k, v) in snippets.items()}
# Save to json files
json.dump(train, open('train.json', 'w'), indent=4, sort_keys=True)
json.dump(val, open('val.json', 'w'), indent=4, sort_keys=True)
json.dump(all, open('all.json', 'w'), indent=4, sort_keys=True)
print('All videos done!')
# Kristyna Janku: Base code was taken from par_crop.py for VID dataset, functions "crop_video" and "main" and also paths
# to files were rewritten by me to fit the Fluo-N2DH-SIM+ dataset
from os.path import join, isdir
from os import mkdir, makedirs
import cv2
import numpy as np
import json
from concurrent import futures
import sys
import time
data_path = './data'
# Print iterations progress (thanks StackOverflow)
def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\x1b[2K\r')
sys.stdout.flush()
def crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):
a = (out_sz-1) / (bbox[2]-bbox[0])
b = (out_sz-1) / (bbox[3]-bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float64)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return crop
def pos_s_2_bbox(pos, s):
return [pos[0]-s/2, pos[1]-s/2, pos[0]+s/2, pos[1]+s/2]
def crop_like_SiamFC(image, bbox, context_amount=0.5, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):
target_pos = [(bbox[2]+bbox[0])/2., (bbox[3]+bbox[1])/2.]
target_size = [bbox[2]-bbox[0], bbox[3]-bbox[1]]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (instanc_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
z = crop_hwc(image, pos_s_2_bbox(target_pos, s_z), exemplar_size, padding)
x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)
return z, x
def crop_video(video, crop_path, instance_size):
video_crop_base_path = join(crop_path, video)
if not isdir(video_crop_base_path): makedirs(video_crop_base_path)
with open('all.json', 'r') as annot_file:
annotations = json.load(annot_file)
for object_id, files in annotations[video].items():
for filename, bbox in files.items():
filepath = join(data_path, video, 't' + filename + '.tif')
# Load 16-bit TIFF image and convert it to 8-bit
im = cv2.convertScaleAbs(cv2.imread(filepath, cv2.IMREAD_ANYDEPTH))
avg_chans = np.mean(im, axis=(0, 1))
z, x = crop_like_SiamFC(im, bbox, instanc_size=instance_size, padding=avg_chans)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(int(filename), int(object_id))), z)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(int(filename), int(object_id))), x)
def main(instance_size=511, num_threads=24):
crop_path = './crop{:d}'.format(instance_size)
if not isdir(crop_path): mkdir(crop_path)
videos = ['01', '02']
n_videos = len(videos)
with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
fs = [executor.submit(crop_video, video, crop_path, instance_size) for video in videos]
for i, f in enumerate(futures.as_completed(fs)):
# Write progress to error so that it can be seen
printProgress(i + 1, n_videos, suffix='Done ', barLength=40)
if __name__ == '__main__':
since = time.time()
main(int(sys.argv[1]), int(sys.argv[2]))
time_elapsed = time.time() - since
print('Total complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment