From e0588bfd14aead70ffdeb01b18f1db3ab23d33e8 Mon Sep 17 00:00:00 2001 From: wxywb Date: Tue, 8 Feb 2022 15:17:59 +0800 Subject: [PATCH] init retinaface mobilenet. --- __init__.py | 13 ++++ pytorch/__init__.py | 14 ++++ pytorch/model.py | 41 +++++++++++ ...pytorch_retinaface_mobilenet_widerface.pth | 3 + requirements.txt | 1 + retinaface_face_detection.py | 73 +++++++++++++++++++ 6 files changed, 145 insertions(+) create mode 100644 __init__.py create mode 100644 pytorch/__init__.py create mode 100644 pytorch/model.py create mode 100644 pytorch/pytorch_retinaface_mobilenet_widerface.pth create mode 100644 requirements.txt create mode 100644 retinaface_face_detection.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..348f360 --- /dev/null +++ b/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/pytorch/__init__.py b/pytorch/__init__.py new file mode 100644 index 0000000..13f6d66 --- /dev/null +++ b/pytorch/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/pytorch/model.py b/pytorch/model.py new file mode 100644 index 0000000..4921173 --- /dev/null +++ b/pytorch/model.py @@ -0,0 +1,41 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import torch + +from towhee.models.retina_face.retinaface import RetinaFace +from towhee.models.retina_face.configs import build_configs +from towhee.models.utils.pretrained_utils import load_pretrained_weights + +class Model: + """ + Pytorch model class + """ + def __init__(self): + model_name = 'cfg_mnet' + cfg = build_configs(model_name) + self._model = RetinaFace(cfg=cfg, phase='test') + load_pretrained_weights(self._model, 'mnet', None, os.path.dirname(__file__) + '/pytorch_retinaface_mobilenet_widerface.pth') + self._model.eval() + + def __call__(self, img_tensor: torch.Tensor): + outputs = self._model.inference(img_tensor) + return outputs + + def train(self): + """ + For training model + """ + pass diff --git a/pytorch/pytorch_retinaface_mobilenet_widerface.pth b/pytorch/pytorch_retinaface_mobilenet_widerface.pth new file mode 100644 index 0000000..287f82f --- /dev/null +++ b/pytorch/pytorch_retinaface_mobilenet_widerface.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b1e031a840c18c24b3f5229c2971285eb4f897cf05f6568f955ae1110b60c88 +size 1821417 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..08ed5ee --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +torch \ No newline at end of file diff --git a/retinaface_face_detection.py b/retinaface_face_detection.py new file mode 100644 index 0000000..97630a4 --- /dev/null +++ b/retinaface_face_detection.py @@ -0,0 +1,73 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import NamedTuple, List +from PIL import Image +import torch +from torchvision import transforms +import sys +import towhee +from pathlib import Path +import numpy + +from towhee.operator import Operator +from towhee.utils.pil_utils import to_pil +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform +import os + +class RetinafaceFaceDetection(Operator): + """ + Embedding extractor using efficientnet. + Args: + model_name (`string`): + Model name. + weights_path (`string`): + Path to local weights. + """ + + def __init__(self, need_crop = True, framework: str = 'pytorch') -> None: + super().__init__() + if framework == 'pytorch': + import importlib.util + path = os.path.join(str(Path(__file__).parent), 'pytorch', 'model.py') + opname = os.path.basename(str(Path(__file__))).split('.')[0] + spec = importlib.util.spec_from_file_location(opname, path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + self.need_crop = need_crop + self.model = module.Model() + + def __call__(self, image: 'towhee.types.Image') -> List[NamedTuple('Outputs', [('boxes', numpy.ndarray), + ('keypoints', numpy.ndarray), + ('cropped_imgs', numpy.ndarray)])]: + Outputs = NamedTuple('Outputs', [('boxes', numpy.ndarray), ('keypoints', numpy.ndarray), ('cropped_imgs', numpy.ndarray)]) + img = torch.FloatTensor(numpy.asarray(to_pil(image))) + bboxes, keypoints = self.model(img) + croppeds = [] + if self.need_crop is True: + h, w, _ = img.shape + for bbox in bboxes: + x1, y1, x2, y2, _ = bbox + x1 = max(int(x1), 0) + y1 = max(int(y1), 0) + x2 = min(int(x2), w) + y2 = min(int(y2), h) + croppeds.append(img[y1:y2, x1:x2, :].numpy()) + outputs = [] + + for i in range(len(croppeds)): + output = Outputs(bboxes[i], keypoints[i,:], croppeds[i]) + outputs.append(output) + return outputs