Skip to content

Commit

Permalink
Move all choices to one file
Browse files Browse the repository at this point in the history
  • Loading branch information
henryruhs committed Aug 16, 2023
1 parent d5886d1 commit 7104204
Show file tree
Hide file tree
Showing 8 changed files with 36 additions and 22 deletions.
10 changes: 10 additions & 0 deletions roop/choices.py
@@ -0,0 +1,10 @@
from typing import List

from roop.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder

face_recognition: List[FaceRecognition] = ['reference', 'many']
face_analyser_direction: List[FaceAnalyserDirection] = ['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small']
face_analyser_age: List[FaceAnalyserAge] = ['child', 'teen', 'adult', 'senior']
face_analyser_gender: List[FaceAnalyserGender] = ['male', 'female']
temp_frame_format: List[TempFrameFormat] = ['jpg', 'png']
output_video_encoder: List[OutputVideoEncoder] = ['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
23 changes: 12 additions & 11 deletions roop/core.py
Expand Up @@ -16,6 +16,7 @@
import tensorflow


import roop.choices
import roop.globals
import roop.metadata
from roop.predictor import predict_image, predict_video
Expand All @@ -37,22 +38,22 @@ def parse_args() -> None:
program.add_argument('--keep-fps', help='keep target fps', dest='keep_fps', action='store_true')
program.add_argument('--keep-temp', help='keep temporary frames', dest='keep_temp', action='store_true')
program.add_argument('--skip-audio', help='skip target audio', dest='skip_audio', action='store_true')
program.add_argument('--face-recognition', help='face recognition method', dest='face_recognition', default='reference', choices=['reference', 'many'])
program.add_argument('--face-analyser-direction', help='direction used for the face analyser', dest='face_analyser_direction', default='left-right', choices=['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'])
program.add_argument('--face-analyser-age', help='age used for the face analyser', dest='face_analyser_age', choices=['child', 'teen', 'adult', 'senior'])
program.add_argument('--face-analyser-gender', help='gender used for the face analyser', dest='face_analyser_gender', choices=['male', 'female'])
program.add_argument('--face-recognition', help='face recognition method', dest='face_recognition', default='reference', choices=roop.choices.face_recognition)
program.add_argument('--face-analyser-direction', help='direction used for the face analyser', dest='face_analyser_direction', default='left-right', choices=roop.choices.face_analyser_direction)
program.add_argument('--face-analyser-age', help='age used for the face analyser', dest='face_analyser_age', choices=roop.choices.face_analyser_age)
program.add_argument('--face-analyser-gender', help='gender used for the face analyser', dest='face_analyser_gender', choices=roop.choices.face_analyser_gender)
program.add_argument('--reference-face-position', help='position of the reference face', dest='reference_face_position', type=int, default=0)
program.add_argument('--reference-face-distance', help='distance between reference face and target face', dest='reference_face_distance', type=float, default=1.5)
program.add_argument('--reference-frame-number', help='number of the reference frame', dest='reference_frame_number', type=int, default=0)
program.add_argument('--trim-frame-start', help='start frame use for extraction', dest='trim_frame_start', type=int)
program.add_argument('--trim-frame-end', help='end frame use for extraction', dest='trim_frame_end', type=int)
program.add_argument('--temp-frame-format', help='image format used for frame extraction', dest='temp_frame_format', default='jpg', choices=['jpg', 'png'])
program.add_argument('--temp-frame-format', help='image format used for frame extraction', dest='temp_frame_format', default='jpg', choices=roop.choices.temp_frame_format)
program.add_argument('--temp-frame-quality', help='image quality used for frame extraction', dest='temp_frame_quality', type=int, default=100, choices=range(101), metavar='[0-100]')
program.add_argument('--output-video-encoder', help='encoder used for the output video', dest='output_video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc'])
program.add_argument('--output-video-encoder', help='encoder used for the output video', dest='output_video_encoder', default='libx264', choices=roop.choices.output_video_encoder)
program.add_argument('--output-video-quality', help='quality used for the output video', dest='output_video_quality', type=int, default=90, choices=range(101), metavar='[0-100]')
program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int)
program.add_argument('--execution-providers', help='list of available execution providers (choices: cpu, ...)', dest='execution_providers', default=['cpu'], choices=suggest_execution_providers(), nargs='+')
program.add_argument('--execution-thread-count', help='number of execution threads', dest='execution_thread_count', type=int, default=suggest_execution_thread_count())
program.add_argument('--execution-providers', help='list of available execution providers (choices: cpu, ...)', dest='execution_providers', default=['cpu'], choices=suggest_execution_providers_choices(), nargs='+')
program.add_argument('--execution-thread-count', help='number of execution threads', dest='execution_thread_count', type=int, default=suggest_execution_thread_count_default())
program.add_argument('--execution-queue-count', help='number of execution queries', dest='execution_queue_count', type=int, default=1)
program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}')

Expand Down Expand Up @@ -86,15 +87,15 @@ def parse_args() -> None:
roop.globals.execution_queue_count = args.execution_queue_count


def suggest_execution_providers() -> List[str]:
def suggest_execution_providers_choices() -> List[str]:
return encode_execution_providers(onnxruntime.get_available_providers())


def suggest_ui_layouts() -> List[str]:
def suggest_ui_layouts_choices() -> List[str]:
return list_module_names('roop/uis/__layouts__')


def suggest_execution_thread_count() -> int:
def suggest_execution_thread_count_default() -> int:
if 'CUDAExecutionProvider' in onnxruntime.get_available_providers():
return 8
return 1
Expand Down
2 changes: 1 addition & 1 deletion roop/processors/frame/core.py
Expand Up @@ -32,7 +32,7 @@ def load_frame_processor_module(frame_processor: str) -> Any:
if not hasattr(frame_processor_module, method_name):
raise NotImplementedError
except ModuleNotFoundError:
sys.exit(f'Frame processor {frame_processor} could be not loaded.')
sys.exit(f'Frame processor {frame_processor} could not be loaded.')
except NotImplementedError:
sys.exit(f'Frame processor {frame_processor} not implemented correctly.')
return frame_processor_module
Expand Down
9 changes: 5 additions & 4 deletions roop/uis/__components__/face_analyser.py
Expand Up @@ -2,6 +2,7 @@

import gradio

import roop.choices
import roop.globals
from roop.uis import core as ui
from roop.uis.typing import Update
Expand All @@ -20,17 +21,17 @@ def render() -> None:
with gradio.Row():
FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown(
label='FACE ANALYSER DIRECTION',
choices=['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'],
value=roop.globals.face_analyser_direction or 'none'
choices=roop.choices.face_analyser_direction,
value=roop.globals.face_analyser_direction
)
FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
label='FACE ANALYSER AGE',
choices=['none', 'child', 'teen', 'adult', 'senior'],
choices=['none'] + roop.choices.face_analyser_age,
value=roop.globals.face_analyser_age or 'none'
)
FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
label='FACE ANALYSER GENDER',
choices=['none', 'male', 'female'],
choices=['none'] + roop.choices.face_analyser_gender,
value=roop.globals.face_analyser_gender or 'none'
)
ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN)
Expand Down
3 changes: 2 additions & 1 deletion roop/uis/__components__/face_selector.py
Expand Up @@ -4,6 +4,7 @@
import cv2
import gradio

import roop.choices
import roop.globals
from roop.capturer import get_video_frame
from roop.face_analyser import get_many_faces
Expand Down Expand Up @@ -40,7 +41,7 @@ def render() -> None:
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
FACE_RECOGNITION_DROPDOWN = gradio.Dropdown(
label='FACE RECOGNITION',
choices=['reference', 'many'],
choices=roop.choices.face_recognition,
value=roop.globals.face_recognition
)
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
Expand Down
4 changes: 2 additions & 2 deletions roop/uis/__components__/output_settings.py
@@ -1,9 +1,9 @@
from typing import Optional
import gradio

import roop.choices
import roop.globals
from roop.typing import OutputVideoEncoder

from roop.uis.typing import Update

OUTPUT_VIDEO_ENCODER_DROPDOWN: Optional[gradio.Dropdown] = None
Expand All @@ -17,7 +17,7 @@ def render() -> None:
with gradio.Box():
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
label='OUTPUT VIDEO ENCODER',
choices=['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc'],
choices=roop.choices.output_video_encoder,
value=roop.globals.output_video_encoder
)
OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
Expand Down
3 changes: 2 additions & 1 deletion roop/uis/__components__/temp_frame.py
@@ -1,6 +1,7 @@
from typing import Optional
import gradio

import roop.choices
import roop.globals
from roop.typing import TempFrameFormat

Expand All @@ -17,7 +18,7 @@ def render() -> None:
with gradio.Box():
TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown(
label='TEMP FRAME FORMAT',
choices=['jpg', 'png'],
choices=roop.choices.temp_frame_format,
value=roop.globals.temp_frame_format
)
TEMP_FRAME_QUALITY_SLIDER = gradio.Slider(
Expand Down
4 changes: 2 additions & 2 deletions roop/uis/core.py
Expand Up @@ -19,7 +19,7 @@


def init() -> None:
with gradio.Blocks(theme=get_theme(), title=roop.metadata.name + ' ' + roop.metadata.version) as ui:
with gradio.Blocks(theme=get_theme(), title=f'{roop.metadata.name} {roop.metadata.version}') as ui:
for ui_layout in roop.globals.ui_layouts:
ui_layout_module = load_ui_layout_module(ui_layout)
ui_layout_module.render()
Expand All @@ -34,7 +34,7 @@ def load_ui_layout_module(ui_layout: str) -> Any:
if not hasattr(ui_layout_module, method_name):
raise NotImplementedError
except ModuleNotFoundError:
sys.exit(f'UI layout {ui_layout} could be not loaded.')
sys.exit(f'UI layout {ui_layout} could not be loaded.')
except NotImplementedError:
sys.exit(f'UI layout {ui_layout} not implemented correctly.')
return ui_layout_module
Expand Down

0 comments on commit 7104204

Please sign in to comment.