From 215b51fa1c4a3adf4d07d5e63c5d3fe021a129bb Mon Sep 17 00:00:00 2001
From: "Lucia D. Hradecka" <lucia.d.hradecka@gmail.com>
Date: Fri, 12 Apr 2024 11:09:59 +0200
Subject: [PATCH] clean repo

---
 100IterationsTest.txt        |  88 ----------------
 HowToChangeDocumentation.txt |  12 ---
 HowToUploadPackage.txt       |  37 -------
 demo.py                      |  96 -----------------
 devel.py                     | 198 -----------------------------------
 tst_volumentations_speed.py  |  50 ---------
 6 files changed, 481 deletions(-)
 delete mode 100644 100IterationsTest.txt
 delete mode 100644 HowToChangeDocumentation.txt
 delete mode 100644 HowToUploadPackage.txt
 delete mode 100644 demo.py
 delete mode 100644 devel.py
 delete mode 100644 tst_volumentations_speed.py

diff --git a/100IterationsTest.txt b/100IterationsTest.txt
deleted file mode 100644
index 7aca0ad..0000000
--- a/100IterationsTest.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-Transform: AffineTransform,  Size: (1, 64, 64, 64), FirstRun: 0.044, Average: 0.040, Iterations: 100, maximum: 0.046 
-Transform: AffineTransform,  Size: (1, 128, 128, 128), FirstRun: 0.343, Average: 0.328, Iterations: 100, maximum: 0.346 
-Transform: AffineTransform,  Size: (1, 256, 256, 256), FirstRun: 3.088, Average: 3.035, Iterations: 100, maximum: 3.110 
-Transform: AffineTransform,  Size: (1, 512, 512, 64), FirstRun: 2.637, Average: 2.633, Iterations: 100, maximum: 2.737 
-Transform: AffineTransform,  Size: (1, 64, 64, 64), FirstRun: 0.050, Average: 0.042, Iterations: 100, maximum: 0.050 
-Transform: AffineTransform,  Size: (1, 128, 128, 128), FirstRun: 0.346, Average: 0.339, Iterations: 100, maximum: 0.352 
-Transform: AffineTransform,  Size: (1, 256, 256, 256), FirstRun: 3.607, Average: 3.613, Iterations: 100, maximum: 3.689 
-Transform: AffineTransform,  Size: (1, 512, 512, 64), FirstRun: 2.950, Average: 2.756, Iterations: 100, maximum: 3.321 
-Transform: Flip,  Size: (1, 64, 64, 64), FirstRun: 0.007, Average: 0.001, Iterations: 100, maximum: 0.002 
-Transform: Flip,  Size: (1, 128, 128, 128), FirstRun: 0.005, Average: 0.006, Iterations: 100, maximum: 0.007 
-Transform: Flip,  Size: (1, 256, 256, 256), FirstRun: 0.050, Average: 0.059, Iterations: 100, maximum: 0.066 
-Transform: Flip,  Size: (1, 512, 512, 64), FirstRun: 0.058, Average: 0.060, Iterations: 100, maximum: 0.068 
-Transform: GaussianBlur,  Size: (1, 64, 64, 64), FirstRun: 0.009, Average: 0.001, Iterations: 100, maximum: 0.002 
-Transform: GaussianBlur,  Size: (1, 128, 128, 128), FirstRun: 0.005, Average: 0.006, Iterations: 100, maximum: 0.007 
-Transform: GaussianBlur,  Size: (1, 256, 256, 256), FirstRun: 0.045, Average: 0.053, Iterations: 100, maximum: 0.060 
-Transform: GaussianBlur,  Size: (1, 512, 512, 64), FirstRun: 0.052, Average: 0.054, Iterations: 100, maximum: 0.060 
-Transform: GaussianNoise,  Size: (1, 64, 64, 64), FirstRun: 0.016, Average: 0.008, Iterations: 100, maximum: 0.009 
-Transform: GaussianNoise,  Size: (1, 128, 128, 128), FirstRun: 0.059, Average: 0.061, Iterations: 100, maximum: 0.062 
-Transform: GaussianNoise,  Size: (1, 256, 256, 256), FirstRun: 0.479, Average: 0.497, Iterations: 100, maximum: 0.510 
-Transform: GaussianNoise,  Size: (1, 512, 512, 64), FirstRun: 0.495, Average: 0.498, Iterations: 100, maximum: 0.529 
-Transform: HistogramEqualization,  Size: (1, 64, 64, 64), FirstRun: 0.068, Average: 0.016, Iterations: 100, maximum: 0.017 
-Transform: HistogramEqualization,  Size: (1, 128, 128, 128), FirstRun: 0.124, Average: 0.125, Iterations: 100, maximum: 0.127 
-Transform: HistogramEqualization,  Size: (1, 256, 256, 256), FirstRun: 1.005, Average: 1.021, Iterations: 100, maximum: 1.089 
-Transform: HistogramEqualization,  Size: (1, 512, 512, 64), FirstRun: 1.008, Average: 1.021, Iterations: 100, maximum: 1.073 
-Transform: Normalize,  Size: (1, 64, 64, 64), FirstRun: 0.005, Average: 0.002, Iterations: 100, maximum: 0.004 
-Transform: Normalize,  Size: (1, 128, 128, 128), FirstRun: 0.022, Average: 0.023, Iterations: 100, maximum: 0.023 
-Transform: Normalize,  Size: (1, 256, 256, 256), FirstRun: 0.178, Average: 0.199, Iterations: 100, maximum: 0.223 
-Transform: Normalize,  Size: (1, 512, 512, 64), FirstRun: 0.186, Average: 0.198, Iterations: 100, maximum: 0.216 
-Transform: NormalizeMeanStd,  Size: (1, 64, 64, 64), FirstRun: 0.007, Average: 0.001, Iterations: 100, maximum: 0.001 
-Transform: NormalizeMeanStd,  Size: (1, 128, 128, 128), FirstRun: 0.005, Average: 0.006, Iterations: 100, maximum: 0.007 
-Transform: NormalizeMeanStd,  Size: (1, 256, 256, 256), FirstRun: 0.042, Average: 0.052, Iterations: 100, maximum: 0.063 
-Transform: NormalizeMeanStd,  Size: (1, 512, 512, 64), FirstRun: 0.050, Average: 0.052, Iterations: 100, maximum: 0.077 
-Transform: RandomBrightnessContrast,  Size: (1, 64, 64, 64), FirstRun: 0.007, Average: 0.001, Iterations: 100, maximum: 0.001 
-Transform: RandomBrightnessContrast,  Size: (1, 128, 128, 128), FirstRun: 0.005, Average: 0.004, Iterations: 100, maximum: 0.007 
-Transform: RandomBrightnessContrast,  Size: (1, 256, 256, 256), FirstRun: 0.024, Average: 0.033, Iterations: 100, maximum: 0.064 
-Transform: RandomBrightnessContrast,  Size: (1, 512, 512, 64), FirstRun: 0.031, Average: 0.035, Iterations: 100, maximum: 0.062 
-Transform: RandomFlip,  Size: (1, 64, 64, 64), FirstRun: 0.007, Average: 0.001, Iterations: 100, maximum: 0.002 
-Transform: RandomFlip,  Size: (1, 128, 128, 128), FirstRun: 0.006, Average: 0.007, Iterations: 100, maximum: 0.009 
-Transform: RandomFlip,  Size: (1, 256, 256, 256), FirstRun: 0.048, Average: 0.067, Iterations: 100, maximum: 0.087 
-Transform: RandomFlip,  Size: (1, 512, 512, 64), FirstRun: 0.067, Average: 0.067, Iterations: 100, maximum: 0.090 
-Transform: RandomGamma,  Size: (1, 64, 64, 64), FirstRun: 0.011, Average: 0.003, Iterations: 100, maximum: 0.004 
-Transform: RandomGamma,  Size: (1, 128, 128, 128), FirstRun: 0.026, Average: 0.027, Iterations: 100, maximum: 0.030 
-Transform: RandomGamma,  Size: (1, 256, 256, 256), FirstRun: 0.222, Average: 0.229, Iterations: 100, maximum: 0.247 
-Transform: RandomGamma,  Size: (1, 512, 512, 64), FirstRun: 0.222, Average: 0.229, Iterations: 100, maximum: 0.247 
-Transform: RandomGaussianBlur,  Size: (1, 64, 64, 64), FirstRun: 0.014, Average: 0.006, Iterations: 100, maximum: 0.007 
-Transform: RandomGaussianBlur,  Size: (1, 128, 128, 128), FirstRun: 0.217, Average: 0.221, Iterations: 100, maximum: 0.235 
-Transform: RandomGaussianBlur,  Size: (1, 256, 256, 256), FirstRun: 0.930, Average: 0.733, Iterations: 100, maximum: 0.971 
-Transform: RandomGaussianBlur,  Size: (1, 512, 512, 64), FirstRun: 0.873, Average: 0.692, Iterations: 100, maximum: 0.940 
-Transform: RandomRotate90,  Size: (1, 64, 64, 64), FirstRun: 0.009, Average: 0.002, Iterations: 100, maximum: 0.003 
-Transform: RandomRotate90,  Size: (1, 128, 128, 128), FirstRun: 0.007, Average: 0.081, Iterations: 100, maximum: 0.153 
-Transform: RandomRotate90,  Size: (1, 256, 256, 256), FirstRun: 0.059, Average: 0.236, Iterations: 100, maximum: 0.656 
-Transform: RandomRotate90,  Size: (1, 512, 512, 64), FirstRun: 0.405, Average: 0.189, Iterations: 100, maximum: 0.452 
-Transform: RandomScale,  Size: (1, 64, 64, 64), FirstRun: 0.026, Average: 0.017, Iterations: 100, maximum: 0.042 
-Transform: RandomScale,  Size: (1, 128, 128, 128), FirstRun: 0.259, Average: 0.141, Iterations: 100, maximum: 0.276 
-Transform: RandomScale,  Size: (1, 256, 256, 256), FirstRun: 2.231, Average: 1.046, Iterations: 100, maximum: 2.180 
-Transform: RandomScale,  Size: (1, 512, 512, 64), FirstRun: 1.857, Average: 1.024, Iterations: 100, maximum: 2.118 
-Transform: Scale,  Size: (1, 64, 64, 64), FirstRun: 0.054, Average: 0.034, Iterations: 100, maximum: 0.047 
-Transform: Scale,  Size: (1, 128, 128, 128), FirstRun: 0.266, Average: 0.271, Iterations: 100, maximum: 0.276 
-Transform: Scale,  Size: (1, 256, 256, 256), FirstRun: 2.177, Average: 2.200, Iterations: 100, maximum: 2.266 
-Transform: Scale,  Size: (1, 512, 512, 64), FirstRun: 2.199, Average: 2.196, Iterations: 100, maximum: 2.240 
-Transform: Scale,  Size: (1, 64, 64, 64), FirstRun: 0.031, Average: 0.005, Iterations: 100, maximum: 0.006 
-Transform: Scale,  Size: (1, 128, 128, 128), FirstRun: 0.038, Average: 0.039, Iterations: 100, maximum: 0.044 
-Transform: Scale,  Size: (1, 256, 256, 256), FirstRun: 0.313, Average: 0.322, Iterations: 100, maximum: 0.370 
-Transform: Scale,  Size: (1, 512, 512, 64), FirstRun: 0.322, Average: 0.325, Iterations: 100, maximum: 0.363 
-Transform: CenterCrop,  Size: (1, 64, 64, 64), FirstRun: 0.068, Average: 0.003, Iterations: 100, maximum: 0.005 
-Transform: CenterCrop,  Size: (1, 128, 128, 128), FirstRun: 0.029, Average: 0.031, Iterations: 100, maximum: 0.037 
-Transform: CenterCrop,  Size: (1, 256, 256, 256), FirstRun: 0.240, Average: 0.220, Iterations: 100, maximum: 0.251 
-Transform: CenterCrop,  Size: (1, 512, 512, 64), FirstRun: 0.263, Average: 0.255, Iterations: 100, maximum: 0.286 
-Transform: CenterCrop,  Size: (1, 64, 64, 64), FirstRun: 0.001, Average: 0.001, Iterations: 100, maximum: 0.001 
-Transform: CenterCrop,  Size: (1, 128, 128, 128), FirstRun: 0.004, Average: 0.005, Iterations: 100, maximum: 0.009 
-Transform: CenterCrop,  Size: (1, 256, 256, 256), FirstRun: 0.032, Average: 0.042, Iterations: 100, maximum: 0.059 
-Transform: CenterCrop,  Size: (1, 512, 512, 64), FirstRun: 0.037, Average: 0.038, Iterations: 100, maximum: 0.045 
-Transform: RandomCrop,  Size: (1, 64, 64, 64), FirstRun: 0.010, Average: 0.004, Iterations: 100, maximum: 0.022 
-Transform: RandomCrop,  Size: (1, 128, 128, 128), FirstRun: 0.029, Average: 0.031, Iterations: 100, maximum: 0.033 
-Transform: RandomCrop,  Size: (1, 256, 256, 256), FirstRun: 0.191, Average: 0.223, Iterations: 100, maximum: 0.292 
-Transform: RandomCrop,  Size: (1, 512, 512, 64), FirstRun: 0.259, Average: 0.243, Iterations: 100, maximum: 0.256 
-Transform: RandomCrop,  Size: (1, 64, 64, 64), FirstRun: 0.002, Average: 0.001, Iterations: 100, maximum: 0.002 
-Transform: RandomCrop,  Size: (1, 128, 128, 128), FirstRun: 0.004, Average: 0.005, Iterations: 100, maximum: 0.006 
-Transform: RandomCrop,  Size: (1, 256, 256, 256), FirstRun: 0.032, Average: 0.040, Iterations: 100, maximum: 0.045 
-Transform: RandomCrop,  Size: (1, 512, 512, 64), FirstRun: 0.036, Average: 0.038, Iterations: 100, maximum: 0.039 
-Transform: Resize,  Size: (1, 64, 64, 64), FirstRun: 0.066, Average: 0.065, Iterations: 100, maximum: 0.068 
-Transform: Resize,  Size: (1, 128, 128, 128), FirstRun: 0.505, Average: 0.511, Iterations: 100, maximum: 0.517 
-Transform: Resize,  Size: (1, 256, 256, 256), FirstRun: 4.249, Average: 4.369, Iterations: 100, maximum: 5.118 
-Transform: Resize,  Size: (1, 512, 512, 64), FirstRun: 4.287, Average: 4.259, Iterations: 100, maximum: 4.360 
-Transform: Resize,  Size: (1, 64, 64, 64), FirstRun: 0.036, Average: 0.009, Iterations: 100, maximum: 0.011 
-Transform: Resize,  Size: (1, 128, 128, 128), FirstRun: 0.068, Average: 0.070, Iterations: 100, maximum: 0.073 
-Transform: Resize,  Size: (1, 256, 256, 256), FirstRun: 0.554, Average: 0.572, Iterations: 100, maximum: 0.608 
-Transform: Resize,  Size: (1, 512, 512, 64), FirstRun: 0.560, Average: 0.568, Iterations: 100, maximum: 0.608 
diff --git a/HowToChangeDocumentation.txt b/HowToChangeDocumentation.txt
deleted file mode 100644
index 81a71c1..0000000
--- a/HowToChangeDocumentation.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Prerequisites : 
-    - You need to know the basics of HTML
-
-Step 1 : 
-    - Open the file index.html located in docs/_build/html/index.html to see what the documentation looks like in your web browser.
-
-Step 2 : 
-    - Open the file index.html located in docs/_build/html/index.html with your preferred text editor.
-
-Step 3 : 
-    - Make changes to the documentation (index.html) as desired, while ensuring that you adhere to the HTML syntax.
-    - You can also modify the file volumentations_biomedicine.augmentations.html if you wish to change the documentation of the functions.
\ No newline at end of file
diff --git a/HowToUploadPackage.txt b/HowToUploadPackage.txt
deleted file mode 100644
index 2e55a58..0000000
--- a/HowToUploadPackage.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Links/resources: 
-  - an easy Tutorial: https://www.youtube.com/watch?v=tEFkHEKypLI&t=563s
-  - pypi uploading packages: https://packaging.python.org/en/latest/tutorials/packaging-projects/
-  - pypi FAQ (directs to "Administration of projects"): https://pypi.org/help/#administration
-
-Prerequisites: 
-  - Have an account in PyPi
-  - Choose a package name that is not already taken.
-  - Have the code of the package directly on your local machine.
-  - Install Twine with : pip3 install twine
-
-Step 1 (Library Structure) : 
-  - Have an __init__.py in your package folder
-  - Have an setup.py at same level as your package
-    Example of how your folder should look like:
-      .
-      ├── bio_volumentations (Package Folder)
-      │   ├── __init__.py
-      │   ├── 100IterationsTest.txt
-      │   ├── ...
-      └── setup.py
-
-  - Ensure that your `setup.py` does not upload a version that has already been uploaded in the past.
-
-Setup 2 (Upload):
-  - When you are in the folder containing `setup.py` and your package, use the following command in your terminal to generate package files: python3 setup.py sdist bdist_wheel
-    Example of how your folder should look like after the command:
-    .
-    ├── bio_volumentations
-    ├── bio_volumentations.egg-info
-    ├── build
-    ├── dist
-    └── setup.py
-
-  - To upload the package, use this command in your terminal (in the same folder as the previous command): twine upload dist/*
-  - Then, enter your API Token to upload the package (You can find this in your PyPi Account settings).
-  - Your package is now uploaded to PyPi.
diff --git a/demo.py b/demo.py
deleted file mode 100644
index 7e16578..0000000
--- a/demo.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# ============================================================================================= #
-#  Author:       Samuel Šuľan, Lucia Hradecká, Filip Lux                                        #
-#  Copyright:    Lucia Hradecká     : lucia.d.hradecka@gmail.com                                #
-#                Filip Lux          : lux.filip@gmail.com                                       #
-#                                                                                               #
-#  MIT License.                                                                                 #
-#                                                                                               #
-#  Permission is hereby granted, free of charge, to any person obtaining a copy                 #
-#  of this software and associated documentation files (the "Software"), to deal                #
-#  in the Software without restriction, including without limitation the rights                 #
-#  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell                    #
-#  copies of the Software, and to permit persons to whom the Software is                        #
-#  furnished to do so, subject to the following conditions:                                     #
-#                                                                                               #
-#  The above copyright notice and this permission notice shall be included in all               #
-#  copies or substantial portions of the Software.                                              #
-#                                                                                               #
-#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR                   #
-#  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,                     #
-#  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE                  #
-#  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER                       #
-#  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,                #
-#  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE                #
-#  SOFTWARE.                                                                                    #
-# ============================================================================================= #
-
-from bio_volumentations.core.composition import *
-from bio_volumentations.augmentations import *
-
-from devel import *
-
-
-def get_augmentation():
-    return Compose([
-        CenterCrop( shape=(200,200,15) ,p=1),
-        AffineTransform( angle_limit = [(20,20), (20,20), (20,20)],  scaling_coef= [1,1,3.22832576], p=1), 
-        Pad(pad_size= [10,3,0],  p = 1)
-        ], p=1.0)
- 
-
-
-def get_augmentation_more():
-    return Compose([
-        CenterCrop( shape=(200,200,20) ,p=1),
-        AffineTransform( angle_limit = [(20,20), (20,20), (20,20)],  scaling_coef= [1,1,3.22832576], p=1), 
-        Pad(pad_size= [10,3,0], p = 1)    ],
-        targets= [ ['image' , "image1"] , ['mask'], ['float_mask'] ], p=1.0)
-
-
-
-if  __name__ == '__main__':
-
-    #constants
-    data_samples = ["ekarev2023-crop-small-anisotropic.tif", "ekarev2023-crop-small-isotropic.tif", "brain.nii","Fluo-C3DH-H157_01_t000.tif","ekarev2023-crop2-3-zscaled.tif",
-                     "ekarev2023-crop2-2.tif"]
-    number_of_sample = 1
-    path_to_image = "../Data_samples/" + data_samples[number_of_sample] 
-    path_to_image1 = "../Data_samples/" + data_samples[2] 
-    
-    path_to_augumented_images = "D:/CBIA/demo/"
-    name_of_file = path_to_image.split("/")[-1]
-    multipleChannels = True
-    normalize_with_regards_to_max = False
-    
-
-
-    img, maximum,minimum, affine_array = image_preparation(path_to_image ,multipleChannels)
-    img1, maximum1,minimum1, affine_array1 = image_preparation(path_to_image1 ,False)
-    mask = img[0].copy()
-    #or use 
-    #img = np.random.rand(1, 128, 256, 256) 
-    #mask = np.random.randint(0, 1, size=(128, 256, 256), dtype=np.uint8)
-
-    aug = get_augmentation()
-    
-    #Categorizing data 
-    data = {'image': img , 'mask' : mask }   
-    #data = {'image': img , 'mask' : mask , 'image1': img1 }   
-    
-    #Starting transformations
-    aug_data = aug(**data)
-
-    #Taking data after transformations
-    #img, mask, img1  = aug_data['image'], aug_data['mask'] , aug_data['image1']
-    img, mask = aug_data['image'], aug_data['mask'] 
-    
-    # just for saving purposes
-    mask = mask[np.newaxis, :]
-
-    #Saving images
-    image_save(path_to_augumented_images + "Image" + ".tif", img, minimum,maximum,affine_array,normalize_with_regards_to_max)
-    image_save(path_to_augumented_images + "mask" + ".tif", mask, minimum,maximum,affine_array,normalize_with_regards_to_max)
-    #image_save(path_to_augumented_images + "Image1" + ".nii", img1, minimum1,maximum1,affine_array1,normalize_with_regards_to_max)
-
-
-
diff --git a/devel.py b/devel.py
deleted file mode 100644
index f20801a..0000000
--- a/devel.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# coding: utf-8
-
-# ============================================================================================= #
-#  Author:       Samuel Šuľan, Lucia Hradecká, Filip Lux                                        #
-#  Copyright:    Lucia Hradecká     : lucia.d.hradecka@gmail.com                                #
-#                Filip Lux          : lux.filip@gmail.com                                       #
-#                                                                                               #
-#  MIT License.                                                                                 #
-#                                                                                               #
-#  Permission is hereby granted, free of charge, to any person obtaining a copy                 #
-#  of this software and associated documentation files (the "Software"), to deal                #
-#  in the Software without restriction, including without limitation the rights                 #
-#  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell                    #
-#  copies of the Software, and to permit persons to whom the Software is                        #
-#  furnished to do so, subject to the following conditions:                                     #
-#                                                                                               #
-#  The above copyright notice and this permission notice shall be included in all               #
-#  copies or substantial portions of the Software.                                              #
-#                                                                                               #
-#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR                   #
-#  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,                     #
-#  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE                  #
-#  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER                       #
-#  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,                #
-#  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE                #
-#  SOFTWARE.                                                                                    #
-# ============================================================================================= #
-
-from bio_volumentations.core.composition import *
-from bio_volumentations.augmentations import *
-import numpy as np
-import tifffile
-import nibabel as nib
-
-
-if  __name__ == '__main__':
-    #path files
-    debug = False
-    data_samples = ["ekarev2023-crop-small-anisotropic.tif", "ekarev2023-crop-small-isotropic.tif", "brain.nii","Fluo-C3DH-H157_01_t000.tif","ekarev2023-crop2-3-zscaled.tif",
-                     "ekarev2023-crop2-2.tif"]
-    number_of_sample = 0
-
-    if debug:
-        path_to_image = "./Data_samples/" + data_samples[number_of_sample] 
-        path_to_augumented_images = "./data_augumented/"
-    else:
-        path_to_image = "../Data_samples/" + data_samples[number_of_sample] 
-        path_to_augumented_images = "../data_augumented/"
-    path_to_augumented_images = "D:/CBIA/augumented/"
-
-# These are functions for loading and saving images - 
-# They are not the perfect, feel free to change them according to your needs.
-
-def image_to_numpy(path):
-    file_format = path.split(".")[-1]
-    if file_format == "tif":
-        numpy_image = tifffile.imread(path)
-        affine_array = None
-    if file_format == "nii":
-        img = nib.load(path)
-        numpy_image = img.get_fdata()
-        affine_array = img.affine
-    return numpy_image.astype(np.float32) , affine_array
-
-def numpy_to_file(path, image, affine_array):
-    file_format = path.split(".")[-1]
-    if file_format == "tif":
-        if len(image.shape) == 5:
-            tifffile.imsave(path, image,  metadata={'axes': 'TZCYX'}, imagej= True)
-        if len(image.shape) == 4:
-            tifffile.imsave(path, image,  metadata={'axes': 'ZCYX'}, imagej= True)
-        else:
-            tifffile.imsave(path, image, imagej= True)
-    if file_format == "nii":
-        array_img = nib.Nifti1Image(image, affine_array)
-        nib.save(array_img, path)
-
-
-def numpy_remove_negative(image):
-    negative_indices = image < 0
-    image[negative_indices] = 0
-    return image
-
-
-def numpy_normalization(image):
-    ''' 
-    0 = min
-    1 = max 
-   '''
-    minimun = image.min()
-    maximum = image.max()
-    if(minimun < 0):
-        image = image + minimun
-    image = image / maximum
-    return image, maximum, minimun
- 
-
-def numpy_reverse_normalization(image, minimum, maximum,normalize_with_regards_to_max):
-    '''
-    can be negative
-    '''
-    print("image.min()" +  str(image.min()))
-    if image.min() < 0:
-        image = image - image.min()
-    print("maximum:" + str(maximum) + "  image.max(): " + str(image.max()))
-    if image.max() == 0:
-        return image.astype(np.ushort)
-    elif normalize_with_regards_to_max:
-        image = image / (  image.max() / maximum)
-    else:
-        image =  image * maximum
-    return image.astype(np.ushort) 
-
-def convention_format(image, multiple_channels = False):
-
-    shape = list(range(len(image.shape)))  
-    shape.reverse() 
-
-    if multiple_channels:
-        
-        shape.insert(0, shape.pop(2))
-        return np.transpose(image, shape)
-    else:
-        return np.expand_dims(np.transpose(image, shape), axis=0)
-
-
-def move_channels_back(image):
-    
-    if image.shape[0] > 1:
-
-        shape = list(range(len(image.shape)))
-        shape.reverse()
-        shape.insert(len(image.shape) - 3, shape.pop())
-        return np.transpose(image, shape)
-    else:
-        shape = [i - 1 for i in range(len(image.shape))]
-        #shape = list(range(len(image.shape)))
-        shape.reverse()
-        shape.pop()
-        return np.transpose(image.squeeze(0), shape)
-
-
-def image_preparation(path,multipleChannels = False):
-    maximum, minimum, affine_array = None, None, None 
-    image, affine_array = image_to_numpy(path)
-    image = convention_format(image,multipleChannels)
-    image, maximum, minimum = numpy_normalization(image)
-    return image, maximum, minimum, affine_array
-
-def image_save(path, image, minimum, maximum, affine_array,normalize_with_regards_to_max):
-    ##affine_array not sure if needed for .nii file. 
-    image = numpy_reverse_normalization(image,minimum, maximum,normalize_with_regards_to_max)
-    image = move_channels_back(image)
-    numpy_to_file(path,image,affine_array)
-
-
-
-def get_augmentation():
-    return Compose([
-        #RandomFlip(axes_to_choose= [1,2,3], p=1),
-        #RandomCrop((200,250,30), p = 1)
-        #RandomScale2( scale_limit= (0.9, 1.1), p = 1),
-        #AffineTransform( angle_limit= [(25,25), (25,25), (25,25)], border_mode="constant", scaling_coef = [1,1,3.22832576] , scale_back= False  ,p=1), # True, , # 
-        #CenterCrop( shape=(200,200,15), ignore_index= 200 ,p=1),
-        #Resize(shape= (300,100, 20), ignore_index= 203, p = 1)
-        #Scale( scale_factor = 0.5, ignore_index= 203 )
-        Pad(pad_size= (8,9), p = 1)
-        #NormalizeMeanStd( mean= [0.5, 2 ], std = 0.5  , p= 1),
-        #Normalize(mean = 2, std = 0, p = 1)
-    ], p=1.0)
-
-
-def get_augmentationMore():
-    return Compose([
-        RandomGamma( gamma_limit = (0.8 , 1,2) , p = 0.8),
-        RandomRotate90(axes=[3,3,3],p=1),
-        GaussianBlur(sigma = 1.2, p = 0.8),
-    ],targets= [ ['image' , "image1"] , ['mask'], ['float_mask'] ] , p=1.0)
-
-
-if  __name__ == '__main__':
-    
-    name_of_file = path_to_image.split("/")[-1] 
-    multipleChannels = True
-    normalize_with_regards_to_max = False
-    path_to_image1 = "../Data_samples/" + data_samples[3] 
-    print(path_to_image)
-
-
-    img, maximum,minimum, affine_array = image_preparation(path_to_image ,multipleChannels)
-    mask = img[0].copy()
-    aug = get_augmentation()    
-    data = {'image': img, 'mask' : mask  } #   #, 'image1': img1
-    aug_data = aug(**data)
-    img = aug_data['image']
-    mask = aug_data['mask']
-
-    #image_save(path_to_augumented_images + "pad" + name_of_file, img, minimum,maximum,affine_array,normalize_with_regards_to_max)
\ No newline at end of file
diff --git a/tst_volumentations_speed.py b/tst_volumentations_speed.py
deleted file mode 100644
index 07601d6..0000000
--- a/tst_volumentations_speed.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# coding: utf-8
-__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'
-
-from bio_volumentations.core.composition import Compose
-from bio_volumentations.augmentations.transforms import RandomAffineTransform
-
-import time
-import numpy as np
-
-
-def tst_volumentations_speed():
-    total_volumes_to_check = 100
-    sizes_list = [
-        (1, 64, 64, 64),
-        # (1, 128, 128, 128),
-        # (1, 256, 256, 256),
-        # (1, 512, 512, 64),
-    ]
-
-    for size in sizes_list:
-
-        full_list_to_check = [
-            RandomAffineTransform(angle_limit=[22.5, 22.5, 22.5], p=1),
-        ]
-
-        for f in full_list_to_check:
-            name = f.__class__.__name__
-            aug1 = Compose([
-                f,
-            ], p=1.0)
-
-            data = []
-            for i in range(total_volumes_to_check):
-                data.append(np.random.uniform(low=0.0, high=255, size=size))
-            start_time = time.time()
-            for i, cube in enumerate(data):
-                
-                try:
-                    cube1 = aug1(image=cube)['image']
-                except Exception as e:
-                    print('Augmentation error: {}'.format(str(e)))
-                    continue
-                
-            delta = time.time() - start_time
-            print('Size: {} Aug: {} Time: {:.2f} sec Per sample: {:.4f} sec'.format(size, name, delta, delta / len(data)))
-            # print(f.__dict__)
-
-
-if __name__ == '__main__':
-    tst_volumentations_speed()
-- 
GitLab