towhee
/
transform-image
copied
8 changed files with 176 additions and 2 deletions
@ -0,0 +1,35 @@ |
|||
# .gitattributes |
|||
|
|||
# Source files |
|||
# ============ |
|||
*.pxd text diff=python |
|||
*.py text diff=python |
|||
*.py3 text diff=python |
|||
*.pyw text diff=python |
|||
*.pyx text diff=python |
|||
*.pyz text diff=python |
|||
*.pyi text diff=python |
|||
|
|||
# Binary files |
|||
# ============ |
|||
*.db binary |
|||
*.p binary |
|||
*.pkl binary |
|||
*.pickle binary |
|||
*.pyc binary export-ignore |
|||
*.pyo binary export-ignore |
|||
*.pyd binary |
|||
|
|||
# Jupyter notebook |
|||
*.ipynb text |
|||
|
|||
# Model files |
|||
*.bin.* filter=lfs diff=lfs merge=lfs -text |
|||
*.lfs.* filter=lfs diff=lfs merge=lfs -text |
|||
*.bin filter=lfs diff=lfs merge=lfs -text |
|||
*.h5 filter=lfs diff=lfs merge=lfs -text |
|||
*.tflite filter=lfs diff=lfs merge=lfs -text |
|||
*.tar.gz filter=lfs diff=lfs merge=lfs -text |
|||
*.ot filter=lfs diff=lfs merge=lfs -text |
|||
*.onnx filter=lfs diff=lfs merge=lfs -text |
|||
*.msgpack filter=lfs diff=lfs merge=lfs -text |
@ -1,3 +1,20 @@ |
|||
# transform-image |
|||
# Transform Image Operator |
|||
|
|||
This is another test repo |
|||
Authors: name or github-name(email) |
|||
|
|||
## Overview |
|||
|
|||
Introduce the functions of op and the model used. |
|||
|
|||
## Interface |
|||
|
|||
The interface of all the functions in op. (input & output) |
|||
|
|||
## How to use |
|||
|
|||
- Requirements from requirements.txt |
|||
- How it works in some typical pipelines and the yaml example. |
|||
|
|||
## Reference |
|||
|
|||
Model link. |
|||
|
@ -0,0 +1,13 @@ |
|||
# Copyright 2021 Zilliz. All rights reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
@ -0,0 +1,4 @@ |
|||
torch>=1.2.0 |
|||
torchvision>=0.4.0 |
|||
numpy>=1.19.5 |
|||
pillow>=8.3.1 |
After Width: | Height: | Size: 178 KiB |
@ -0,0 +1,29 @@ |
|||
import unittest |
|||
from PIL import Image |
|||
from torchvision import transforms |
|||
from transform_image import TransformImage |
|||
|
|||
|
|||
class TestTransformImage(unittest.TestCase): |
|||
test_img = './test_data/test.jpg' |
|||
test_img = Image.open(test_img) |
|||
tfms = transforms.Compose( |
|||
[ |
|||
transforms.Resize(256), |
|||
transforms.CenterCrop(224), |
|||
transforms.ToTensor(), |
|||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), |
|||
] |
|||
) |
|||
img1 = tfms(test_img).unsqueeze(0) |
|||
|
|||
def test_transform_image(self): |
|||
op = TransformImage(256) |
|||
outputs = op(self.test_img) |
|||
print("The output tyep of operator:", type(outputs.img_transformed)) |
|||
c = (self.img1.numpy() == outputs.img_transformed.numpy()) |
|||
self.assertEqual(c.all(), True) |
|||
|
|||
|
|||
if __name__ == '__main__': |
|||
unittest.main() |
@ -0,0 +1,63 @@ |
|||
# Copyright 2021 Zilliz. All rights reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
from typing import NamedTuple, Union |
|||
|
|||
import torch |
|||
import numpy as np |
|||
from PIL import Image |
|||
from torchvision import transforms |
|||
|
|||
from towhee.operator import Operator |
|||
|
|||
|
|||
class TransformImage(Operator): |
|||
""" |
|||
Use PyTorch to transform an image (resize, crop, normalize, etc...) |
|||
|
|||
Args: |
|||
size (`int`): |
|||
Image size to use. A resize to `size x size` followed by center crop and |
|||
image normalization will be done. |
|||
""" |
|||
def __init__(self, size: int) -> None: |
|||
super().__init__() |
|||
# user defined transform |
|||
self.tfms = transforms.Compose( |
|||
[ |
|||
transforms.Resize(size), |
|||
transforms.CenterCrop(224), |
|||
transforms.ToTensor(), |
|||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), |
|||
] |
|||
) |
|||
|
|||
def __call__(self, img_tensor: Union[np.ndarray, Image.Image, torch.Tensor, str]) -> NamedTuple('Outputs', [('img_transformed', torch.Tensor)]): |
|||
""" |
|||
Call it when use this class. |
|||
|
|||
Args: |
|||
img_tensor (`Union[np.ndarray, Image.Image, torch.Tensor, str]`): |
|||
The image data to be normalized, you can try one of the |
|||
four formats: np.ndarray, Image.Image, torch.Tensor and str. |
|||
Returns: |
|||
(`torch.Tensor`) |
|||
The normalized image tensor. |
|||
""" |
|||
if isinstance(img_tensor, str): |
|||
img_tensor = Image.open(img_tensor) |
|||
if isinstance(img_tensor, Image.Image): |
|||
img_tensor = img_tensor.convert('RGB') |
|||
Outputs = NamedTuple('Outputs', [('img_transformed', torch.Tensor)]) |
|||
return Outputs(self.tfms(img_tensor).unsqueeze(0)) |
@ -0,0 +1,13 @@ |
|||
name: 'transform-image' |
|||
labels: |
|||
recommended_framework: pytorch1.2.0 |
|||
class: image-transform |
|||
others: |
|||
operator: 'towhee/transform-image' |
|||
init: |
|||
size: int |
|||
call: |
|||
input: |
|||
img_tensor: Union[np.ndarray, Image.Image, torch.Tensor, str] |
|||
output: |
|||
img_transformed: torch.Tensor |
Loading…
Reference in new issue