Skip to content

Commit

Permalink
Benchmark improvements (#413)
Browse files Browse the repository at this point in the history
* Fix benchmarking issues

* Use copy=True when converting Pillow Image to np.array

* Update benchmarking results
  • Loading branch information
creafz authored and ternaus committed Oct 10, 2019
1 parent d288207 commit ff1a17f
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 39 deletions.
32 changes: 16 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -205,22 +205,22 @@ The table shows how many images per second can be processed on a single core, hi

| | albumentations <br><small>0.4.0</small> | imgaug <br><small>0.2.9</small> | torchvision (Pillow backend) <br><small>0.4.0</small> | torchvision (Pillow-SIMD backend) <br><small>0.4.0</small> | keras <br><small>2.3.1</small> | augmentor <br><small>0.2.6</small> | solt <br><small>0.1.8</small> |
|------------------------|:--------------:|:------:|:--------------------:|:-------------------------:|:-----:|:---------:|:-----:|
| HorizontalFlip | 1393 | 880 | **3799** | 3670 | 543 | 3744 | 190 |
| VerticalFlip | **3908** | 2120 | 2856 | 3294 | 3901 | 2752 | 3178 |
| Rotate | **296** | 251 | 82 | 104 | 10 | 37 | 74 |
| ShiftScaleRotate | **640** | 428 | 78 | 109 | 13 | - | - |
| Brightness | **1723** | 1010 | 303 | 373 | 132 | 292 | 1535 |
| Contrast | **1753** | 1126 | 215 | 261 | - | 209 | 1535 |
| BrightnessContrast | **1698** | 566 | 125 | 152 | - | 118 | 794 |
| ShiftRGB | **1698** | 1022 | - | - | 507 | - | - |
| ShiftHSV | **341** | 339 | 34 | 45 | - | - | 106 |
| Gamma | **1809** | - | 725 | 775 | - | - | 571 |
| Grayscale | 3564 | 225 | 662 | 800 | - | 2042 |**4723**|
| RandomCrop64 | **296061** | 2354 | 25418 | 49619 | - | 19515 | 38583 |
| PadToSize512 | **2690** | - | 396 | 433 | - | - | 2381 |
| Resize512 | 573 | 413 | 247 | **1009** | - | 232 | 552 |
| RandomSizedCrop_64_512 | **1865** | 782 | 421 | 1714 | - | 409 | 1751 |
| Equalize | **535** | 463 | - | - | - | 298 | - |
| HorizontalFlip | 961 | 754 | **1246** | 1251 | 669 | 1154 | 619 |
| VerticalFlip | **3941** | 2069 | 1105 | 1150 | 3884 | 1054 | 3540 |
| Rotate | **375** | 300 | 83 | 120 | 18 | 36 | 91 |
| ShiftScaleRotate | **664** | 454 | 75 | 116 | 23 | - | - |
| Brightness | **1806** | 1067 | 260 | 320 | 133 | 252 | 1694 |
| Contrast | **1701** | 1123 | 190 | 241 | - | 184 | 1699 |
| BrightnessContrast | **1749** | 577 | 114 | 143 | - | 112 | 880 |
| ShiftRGB | **1813** | 984 | - | - | 509 | - | - |
| ShiftHSV | **349** | 340 | 35 | 45 | - | - | 106 |
| Gamma | **1926** | - | 549 | 580 | - | - | 701 |
| Grayscale | **3688** | 307 | 487 | 574 | - | 872 | 2927 |
| RandomCrop64 | **602010** | 2908 | 22398 | 33850 | - | 14267 | 38450 |
| PadToSize512 | **2749** | - | 350 | 378 | - | - | 2370 |
| Resize512 | 576 | 427 | 211 | **648** | - | 213 | 568 |
| RandomSizedCrop_64_512 | **2223** | 715 | 334 | 1023 | - | 339 | 1949 |
| Equalize | **466** | 460 | - | - | - | 256 | - |

Python and library versions: Python 3.7.3, numpy 1.17.2, pillow 6.2.0, pillow-simd 6.0.0.post0, opencv-python 4.1.1.26, scikit-image 0.15.0, scipy 1.3.0.

Expand Down
64 changes: 41 additions & 23 deletions benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,20 +98,27 @@ def imgaug(self, img):
return self.imgaug_transform.augment_image(img)

def augmentor(self, img):
return self.augmentor_op.perform_operation([img])
img = self.augmentor_op.perform_operation([img])[0]
return np.array(img, np.uint8, copy=True)

def solt(self, img):
dc = sld.DataContainer(img, "I")
dc = self.solt_stream(dc)
return dc.data[0]

def torchvision(self, img):
img = self.torchvision_transform(img)
return np.array(img, np.uint8, copy=True)

def is_supported_by(self, library):
if library == "imgaug":
return hasattr(self, "imgaug_transform")
elif library == "augmentor":
return hasattr(self, "augmentor_op") or hasattr(self, "augmentor_pipeline")
elif library == "solt":
return hasattr(self, "solt_stream")
elif library == "torchvision":
return hasattr(self, "torchvision_transform")
else:
return hasattr(self, library)

Expand All @@ -133,7 +140,7 @@ def albumentations(self, img):
else:
return albumentations.hflip(img)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.hflip(img)

def keras(self, img):
Expand All @@ -152,7 +159,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.vflip(img)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.vflip(img)

def keras(self, img):
Expand All @@ -171,7 +178,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.rotate(img, angle=-45)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.rotate(img, angle=-45, resample=Image.BILINEAR)

def keras(self, img):
Expand All @@ -187,7 +194,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.brightness_contrast_adjust(img, beta=0.5, beta_by_max=True)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.adjust_brightness(img, brightness_factor=1.5)

def keras(self, img):
Expand All @@ -203,7 +210,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.brightness_contrast_adjust(img, alpha=1.5)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.adjust_contrast(img, contrast_factor=1.5)


Expand All @@ -227,15 +234,15 @@ def __init__(self):
def albumentations(self, img):
return albumentations.brightness_contrast_adjust(img, alpha=1.5, beta=0.5, beta_by_max=True)

def torchvision(self, img):
def torchvision_transform(self, img):
img = torchvision.adjust_brightness(img, brightness_factor=1.5)
img = torchvision.adjust_contrast(img, contrast_factor=1.5)
return img

def augmentor(self, img):
for operation in self.augmentor_pipeline.operations:
img, = operation.perform_operation([img])
return img
return np.array(img, np.uint8, copy=True)


class ShiftScaleRotate(BenchmarkTest):
Expand All @@ -247,7 +254,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.shift_scale_rotate(img, angle=-45, scale=2, dx=0.2, dy=0.2)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.affine(img, angle=45, translate=(50, 50), scale=2, shear=0, resample=Image.BILINEAR)

def keras(self, img):
Expand All @@ -262,7 +269,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.shift_hsv(img, hue_shift=20, sat_shift=20, val_shift=20)

def torchvision(self, img):
def torchvision_transform(self, img):
img = torchvision.adjust_hue(img, hue_factor=0.1)
img = torchvision.adjust_saturation(img, saturation_factor=1.2)
img = torchvision.adjust_brightness(img, brightness_factor=1.2)
Expand Down Expand Up @@ -301,17 +308,17 @@ def __init__(self):
def albumentations(self, img):
return albumentations.random_crop(img, crop_height=64, crop_width=64, h_start=0, w_start=0)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.crop(img, i=0, j=0, h=64, w=64)


class RandomSizedCrop_64_512(BenchmarkTest):
def __init__(self):

self.augmentor_op = [
Operations.Crop(probability=1, width=64, height=64, centre=False),
Operations.Resize(probability=1, width=512, height=512, resample_filter="BILINEAR"),
]
self.augmentor_pipeline = Pipeline()
self.augmentor_pipeline.add_operation(Operations.Crop(probability=1, width=64, height=64, centre=False))
self.augmentor_pipeline.add_operation(
Operations.Resize(probability=1, width=512, height=512, resample_filter="BILINEAR")
)
self.imgaug_transform = iaa.Sequential(
[iaa.CropToFixedSize(width=64, height=64), iaa.Scale(size=512, interpolation="linear")]
)
Expand All @@ -324,10 +331,11 @@ def albumentations(self, img):
return albumentations.resize(img, height=512, width=512)

def augmentor(self, img):
img = self.augmentor_op[0].perform_operation([img])[0]
return self.augmentor_op[1].perform_operation([img])
for operation in self.augmentor_pipeline.operations:
img, = operation.perform_operation([img])
return np.array(img, np.uint8, copy=True)

def torchvision(self, img):
def torchvision_transform(self, img):
img = torchvision.crop(img, i=0, j=0, h=64, w=64)
return torchvision.resize(img, (512, 512))

Expand All @@ -350,7 +358,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.pad(img, min_height=512, min_width=512)

def torchvision(self, img):
def torchvision_transform(self, img):
if img.size[0] < 512:
img = torchvision.pad(img, (int((1 + 512 - img.size[0]) / 2), 0), padding_mode="reflect")
if img.size[1] < 512:
Expand All @@ -367,7 +375,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.resize(img, height=512, width=512)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.resize(img, (512, 512))


Expand All @@ -378,7 +386,7 @@ def __init__(self):
def albumentations(self, img):
return albumentations.gamma_transform(img, gamma=0.5)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.adjust_gamma(img, gamma=0.5)


Expand All @@ -391,9 +399,19 @@ def __init__(self):
def albumentations(self, img):
return albumentations.to_gray(img)

def torchvision(self, img):
def torchvision_transform(self, img):
return torchvision.to_grayscale(img, num_output_channels=3)

def solt(self, img):
dc = sld.DataContainer(img, "I")
dc = self.solt_stream(dc)
return cv2.cvtColor(dc.data[0], cv2.COLOR_GRAY2RGB)

def augmentor(self, img):
img = self.augmentor_op.perform_operation([img])[0]
img = np.array(img, np.uint8, copy=True)
return np.dstack([img, img, img])


class Posterize(BenchmarkTest):
def albumentations(self, img):
Expand Down

0 comments on commit ff1a17f

Please sign in to comment.