Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SEND JOB TO COMFY UI #1347

Open
enzyme69 opened this issue Dec 3, 2023 · 1 comment
Open

SEND JOB TO COMFY UI #1347

enzyme69 opened this issue Dec 3, 2023 · 1 comment

Comments

@enzyme69
Copy link
Owner

enzyme69 commented Dec 3, 2023

Install ComfyUI using PINOKIO AI Browser. Having it running as server backend for AI Generative.

I am using Mac Mini M2 Pro 32 GB. Seems good base model, not the fastest, but silent and speedy anough. Make sure you have 2 TB or more.

# SDXL Turbo model is used, but LoRA LCM was also fast

import bpy
import json
import requests
import random

# ComfyUI server address
comfyui_address = "http://127.0.0.1:8188"

def queue_prompt(prompt_workflow):
    p = {"prompt": prompt_workflow}
    
    # Convert prompt workflow to JSON
    workflow_json = json.dumps(p).encode('utf-8')

    # Send POST request to ComfyUI server
    response = requests.post(comfyui_address + "/prompt", data=workflow_json)

    # Check if request was successful
    if response.status_code != 200:
        print("Error sending prompt to ComfyUI:", response.status_code)
        print(response.text)
    else:
        print("Prompt successfully sent to ComfyUI")

# Read workflow API data from file and convert it into dictionary
prompt_workflow = json.load(open("/Users/jimmygunawan/Downloads/sdxl_turbo_api.json"))

# Create a list of prompts

prompt_list = []

# Access the text block and append for ComfyUI
# Get the text datablock
text_datablock = bpy.data.texts['MonkeyRobot']

# Get the text content
text_content = text_datablock.lines

if text_datablock:
    # Read the contents of the text block
    for i in text_content:
        print(i.body)
        prompt_list.append(i.body)
else:
    print(f"Text block '{text_block_name}' not found.")

#prompt_list = []
#prompt_list.append("photo of a bearded man Mario wearing a red hat sitting alone in a cafe")
#prompt_list.append("oil painting of a llama standing in the middle of a busy street")
#prompt_list.append("water colour art of a fish sitting in a tree")
#prompt_list.append("beautiful mushroom glass bottle landscape, purple galaxy bottle")
#prompt_list.append("princess peach on abandoned castle reading book at night, fireflies")
#prompt_list.append("head totem of monkey head, made of mecha garbage recycle, in the middle of empty park")

# Give some easy-to-remember names to the nodes
prompt_pos_node = prompt_workflow["2"]
seed_node = prompt_workflow["4"]
#save_image_node = prompt_workflow["1"]
#empty_latent_img_node = prompt_workflow["1"]

# Set image dimensions and batch size in EmptyLatentImage node
#empty_latent_img_node["inputs"]["width"] = 512
#empty_latent_img_node["inputs"]["height"] = 640
#empty_latent_img_node["inputs"]["num_images"] = 1

# For every prompt in prompt_list...
for index, prompt in enumerate(prompt_list):
    # Set the text prompt for positive CLIPTextEncode node
    prompt_pos_node["inputs"]["text"] = prompt + " made of garbage, waste recycles, wabisabi, imperfect"

    # Set a random seed in KSampler node
    seed_node["inputs"]["noise_seed"] = random.randint(1, 18446744073709551614)

    # Set filename prefix to be the same as prompt
    # (truncate to first 100 chars if necessary)
    #fileprefix = prompt
    #if len(fileprefix) > 100:
    #    fileprefix = fileprefix[:100]

    #save_image_node["inputs"]["filename_prefix"] = fileprefix

    # Everything set, add entire workflow to queue.
    queue_prompt(prompt_workflow)
@enzyme69
Copy link
Owner Author

enzyme69 commented Dec 3, 2023

sdxl_turbo_api.json.zip

{
"1": {
"inputs": {
"ckpt_name": "sd_xl_turbo_1.0_fp16.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"2": {
"inputs": {
"text": "oil painting style of ",
"clip": [
"1",
1
]
},
"class_type": "CLIPTextEncode"
},
"3": {
"inputs": {
"text": "",
"clip": [
"1",
1
]
},
"class_type": "CLIPTextEncode"
},
"4": {
"inputs": {
"add_noise": true,
"noise_seed": 340269611110808,
"cfg": 1,
"model": [
"1",
0
],
"positive": [
"2",
0
],
"negative": [
"3",
0
],
"sampler": [
"13",
0
],
"sigmas": [
"9",
0
],
"latent_image": [
"10",
0
]
},
"class_type": "SamplerCustom"
},
"9": {
"inputs": {
"steps": 1,
"model": [
"1",
0
]
},
"class_type": "SDTurboScheduler"
},
"10": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"11": {
"inputs": {
"samples": [
"4",
0
],
"vae": [
"1",
2
]
},
"class_type": "VAEDecode"
},
"12": {
"inputs": {
"images": [
"11",
0
]
},
"class_type": "PreviewImage"
},
"13": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "KSamplerSelect"
},
"19": {
"inputs": {
"pixels": [
"21",
0
],
"vae": [
"1",
2
]
},
"class_type": "VAEEncode"
},
"21": {
"inputs": {
"upscale_method": "nearest-exact",
"scale_by": 2,
"image": [
"11",
0
]
},
"class_type": "ImageScaleBy"
},
"25": {
"inputs": {
"samples": [
"27",
0
],
"vae": [
"1",
2
]
},
"class_type": "VAEDecode"
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage"
},
"27": {
"inputs": {
"seed": 340269611110554,
"steps": 2,
"cfg": 1,
"sampler_name": "ddim",
"scheduler": "ddim_uniform",
"denoise": 1,
"model": [
"1",
0
],
"positive": [
"2",
0
],
"negative": [
"3",
0
],
"latent_image": [
"19",
0
]
},
"class_type": "KSampler"
},
"28": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"25",
0
]
},
"class_type": "SaveImage"
},
"29": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"11",
0
]
},
"class_type": "SaveImage"
}
}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant