From 7104204dadf64333e81050657266178e6179521c Mon Sep 17 00:00:00 2001 From: henryruhs Date: Wed, 16 Aug 2023 13:19:04 +0200 Subject: [PATCH] Move all choices to one file --- roop/choices.py | 10 ++++++++++ roop/core.py | 23 +++++++++++----------- roop/processors/frame/core.py | 2 +- roop/uis/__components__/face_analyser.py | 9 +++++---- roop/uis/__components__/face_selector.py | 3 ++- roop/uis/__components__/output_settings.py | 4 ++-- roop/uis/__components__/temp_frame.py | 3 ++- roop/uis/core.py | 4 ++-- 8 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 roop/choices.py diff --git a/roop/choices.py b/roop/choices.py new file mode 100644 index 000000000..71432d649 --- /dev/null +++ b/roop/choices.py @@ -0,0 +1,10 @@ +from typing import List + +from roop.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder + +face_recognition: List[FaceRecognition] = ['reference', 'many'] +face_analyser_direction: List[FaceAnalyserDirection] = ['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'] +face_analyser_age: List[FaceAnalyserAge] = ['child', 'teen', 'adult', 'senior'] +face_analyser_gender: List[FaceAnalyserGender] = ['male', 'female'] +temp_frame_format: List[TempFrameFormat] = ['jpg', 'png'] +output_video_encoder: List[OutputVideoEncoder] = ['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc'] diff --git a/roop/core.py b/roop/core.py index 0d92421b2..1181d0cfa 100755 --- a/roop/core.py +++ b/roop/core.py @@ -16,6 +16,7 @@ import tensorflow +import roop.choices import roop.globals import roop.metadata from roop.predictor import predict_image, predict_video @@ -37,22 +38,22 @@ def parse_args() -> None: program.add_argument('--keep-fps', help='keep target fps', dest='keep_fps', action='store_true') program.add_argument('--keep-temp', help='keep temporary frames', dest='keep_temp', action='store_true') program.add_argument('--skip-audio', help='skip target audio', dest='skip_audio', action='store_true') - program.add_argument('--face-recognition', help='face recognition method', dest='face_recognition', default='reference', choices=['reference', 'many']) - program.add_argument('--face-analyser-direction', help='direction used for the face analyser', dest='face_analyser_direction', default='left-right', choices=['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small']) - program.add_argument('--face-analyser-age', help='age used for the face analyser', dest='face_analyser_age', choices=['child', 'teen', 'adult', 'senior']) - program.add_argument('--face-analyser-gender', help='gender used for the face analyser', dest='face_analyser_gender', choices=['male', 'female']) + program.add_argument('--face-recognition', help='face recognition method', dest='face_recognition', default='reference', choices=roop.choices.face_recognition) + program.add_argument('--face-analyser-direction', help='direction used for the face analyser', dest='face_analyser_direction', default='left-right', choices=roop.choices.face_analyser_direction) + program.add_argument('--face-analyser-age', help='age used for the face analyser', dest='face_analyser_age', choices=roop.choices.face_analyser_age) + program.add_argument('--face-analyser-gender', help='gender used for the face analyser', dest='face_analyser_gender', choices=roop.choices.face_analyser_gender) program.add_argument('--reference-face-position', help='position of the reference face', dest='reference_face_position', type=int, default=0) program.add_argument('--reference-face-distance', help='distance between reference face and target face', dest='reference_face_distance', type=float, default=1.5) program.add_argument('--reference-frame-number', help='number of the reference frame', dest='reference_frame_number', type=int, default=0) program.add_argument('--trim-frame-start', help='start frame use for extraction', dest='trim_frame_start', type=int) program.add_argument('--trim-frame-end', help='end frame use for extraction', dest='trim_frame_end', type=int) - program.add_argument('--temp-frame-format', help='image format used for frame extraction', dest='temp_frame_format', default='jpg', choices=['jpg', 'png']) + program.add_argument('--temp-frame-format', help='image format used for frame extraction', dest='temp_frame_format', default='jpg', choices=roop.choices.temp_frame_format) program.add_argument('--temp-frame-quality', help='image quality used for frame extraction', dest='temp_frame_quality', type=int, default=100, choices=range(101), metavar='[0-100]') - program.add_argument('--output-video-encoder', help='encoder used for the output video', dest='output_video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']) + program.add_argument('--output-video-encoder', help='encoder used for the output video', dest='output_video_encoder', default='libx264', choices=roop.choices.output_video_encoder) program.add_argument('--output-video-quality', help='quality used for the output video', dest='output_video_quality', type=int, default=90, choices=range(101), metavar='[0-100]') program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int) - program.add_argument('--execution-providers', help='list of available execution providers (choices: cpu, ...)', dest='execution_providers', default=['cpu'], choices=suggest_execution_providers(), nargs='+') - program.add_argument('--execution-thread-count', help='number of execution threads', dest='execution_thread_count', type=int, default=suggest_execution_thread_count()) + program.add_argument('--execution-providers', help='list of available execution providers (choices: cpu, ...)', dest='execution_providers', default=['cpu'], choices=suggest_execution_providers_choices(), nargs='+') + program.add_argument('--execution-thread-count', help='number of execution threads', dest='execution_thread_count', type=int, default=suggest_execution_thread_count_default()) program.add_argument('--execution-queue-count', help='number of execution queries', dest='execution_queue_count', type=int, default=1) program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}') @@ -86,15 +87,15 @@ def parse_args() -> None: roop.globals.execution_queue_count = args.execution_queue_count -def suggest_execution_providers() -> List[str]: +def suggest_execution_providers_choices() -> List[str]: return encode_execution_providers(onnxruntime.get_available_providers()) -def suggest_ui_layouts() -> List[str]: +def suggest_ui_layouts_choices() -> List[str]: return list_module_names('roop/uis/__layouts__') -def suggest_execution_thread_count() -> int: +def suggest_execution_thread_count_default() -> int: if 'CUDAExecutionProvider' in onnxruntime.get_available_providers(): return 8 return 1 diff --git a/roop/processors/frame/core.py b/roop/processors/frame/core.py index b73ea21d3..07a41725e 100644 --- a/roop/processors/frame/core.py +++ b/roop/processors/frame/core.py @@ -32,7 +32,7 @@ def load_frame_processor_module(frame_processor: str) -> Any: if not hasattr(frame_processor_module, method_name): raise NotImplementedError except ModuleNotFoundError: - sys.exit(f'Frame processor {frame_processor} could be not loaded.') + sys.exit(f'Frame processor {frame_processor} could not be loaded.') except NotImplementedError: sys.exit(f'Frame processor {frame_processor} not implemented correctly.') return frame_processor_module diff --git a/roop/uis/__components__/face_analyser.py b/roop/uis/__components__/face_analyser.py index dc5214189..15a16b8f3 100644 --- a/roop/uis/__components__/face_analyser.py +++ b/roop/uis/__components__/face_analyser.py @@ -2,6 +2,7 @@ import gradio +import roop.choices import roop.globals from roop.uis import core as ui from roop.uis.typing import Update @@ -20,17 +21,17 @@ def render() -> None: with gradio.Row(): FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown( label='FACE ANALYSER DIRECTION', - choices=['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'], - value=roop.globals.face_analyser_direction or 'none' + choices=roop.choices.face_analyser_direction, + value=roop.globals.face_analyser_direction ) FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown( label='FACE ANALYSER AGE', - choices=['none', 'child', 'teen', 'adult', 'senior'], + choices=['none'] + roop.choices.face_analyser_age, value=roop.globals.face_analyser_age or 'none' ) FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown( label='FACE ANALYSER GENDER', - choices=['none', 'male', 'female'], + choices=['none'] + roop.choices.face_analyser_gender, value=roop.globals.face_analyser_gender or 'none' ) ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN) diff --git a/roop/uis/__components__/face_selector.py b/roop/uis/__components__/face_selector.py index e072d85fe..f08ccfc4f 100644 --- a/roop/uis/__components__/face_selector.py +++ b/roop/uis/__components__/face_selector.py @@ -4,6 +4,7 @@ import cv2 import gradio +import roop.choices import roop.globals from roop.capturer import get_video_frame from roop.face_analyser import get_many_faces @@ -40,7 +41,7 @@ def render() -> None: reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) FACE_RECOGNITION_DROPDOWN = gradio.Dropdown( label='FACE RECOGNITION', - choices=['reference', 'many'], + choices=roop.choices.face_recognition, value=roop.globals.face_recognition ) REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) diff --git a/roop/uis/__components__/output_settings.py b/roop/uis/__components__/output_settings.py index abc42a77d..fc44f7423 100644 --- a/roop/uis/__components__/output_settings.py +++ b/roop/uis/__components__/output_settings.py @@ -1,9 +1,9 @@ from typing import Optional import gradio +import roop.choices import roop.globals from roop.typing import OutputVideoEncoder - from roop.uis.typing import Update OUTPUT_VIDEO_ENCODER_DROPDOWN: Optional[gradio.Dropdown] = None @@ -17,7 +17,7 @@ def render() -> None: with gradio.Box(): OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( label='OUTPUT VIDEO ENCODER', - choices=['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc'], + choices=roop.choices.output_video_encoder, value=roop.globals.output_video_encoder ) OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider( diff --git a/roop/uis/__components__/temp_frame.py b/roop/uis/__components__/temp_frame.py index d3933e8ab..39902ca7b 100644 --- a/roop/uis/__components__/temp_frame.py +++ b/roop/uis/__components__/temp_frame.py @@ -1,6 +1,7 @@ from typing import Optional import gradio +import roop.choices import roop.globals from roop.typing import TempFrameFormat @@ -17,7 +18,7 @@ def render() -> None: with gradio.Box(): TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown( label='TEMP FRAME FORMAT', - choices=['jpg', 'png'], + choices=roop.choices.temp_frame_format, value=roop.globals.temp_frame_format ) TEMP_FRAME_QUALITY_SLIDER = gradio.Slider( diff --git a/roop/uis/core.py b/roop/uis/core.py index bc43b27fd..92a12d060 100644 --- a/roop/uis/core.py +++ b/roop/uis/core.py @@ -19,7 +19,7 @@ def init() -> None: - with gradio.Blocks(theme=get_theme(), title=roop.metadata.name + ' ' + roop.metadata.version) as ui: + with gradio.Blocks(theme=get_theme(), title=f'{roop.metadata.name} {roop.metadata.version}') as ui: for ui_layout in roop.globals.ui_layouts: ui_layout_module = load_ui_layout_module(ui_layout) ui_layout_module.render() @@ -34,7 +34,7 @@ def load_ui_layout_module(ui_layout: str) -> Any: if not hasattr(ui_layout_module, method_name): raise NotImplementedError except ModuleNotFoundError: - sys.exit(f'UI layout {ui_layout} could be not loaded.') + sys.exit(f'UI layout {ui_layout} could not be loaded.') except NotImplementedError: sys.exit(f'UI layout {ui_layout} not implemented correctly.') return ui_layout_module