From e2baa565ad3ba1910778a87813e99b8dacce3c1a Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Wed, 9 Apr 2025 11:24:30 -0700 Subject: [PATCH 01/40] Add Ideogram generate node. --- comfy_extras/nodes_api.py | 283 ++++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 284 insertions(+) create mode 100644 comfy_extras/nodes_api.py diff --git a/comfy_extras/nodes_api.py b/comfy_extras/nodes_api.py new file mode 100644 index 00000000000..8ab57bb26c5 --- /dev/null +++ b/comfy_extras/nodes_api.py @@ -0,0 +1,283 @@ +from inspect import cleandoc +class IdeogramTextToImage: + """ + Generates images synchronously based on a given prompt and optional parameters. + + Images links are available for a limited period of time; if you would like to keep the image, you must download it. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + """ + Return a dictionary which contains config for all input fields. + Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT". + Input types "INT", "STRING" or "FLOAT" are special values for fields on the node. + The type can be a list for selection. + + Returns: `dict`: + - Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required` + - Value input_fields (`dict`): Contains input fields config: + * Key field_name (`string`): Name of a entry-point method's argument + * Value field_config (`tuple`): + + First value is a string indicate the type of field or a list for selection. + + Secound value is a config for type "INT", "STRING" or "FLOAT". + """ + return { + "required": { + "prompt": ("STRING", {"multiline": True, + "default": "", "tooltip": "Prompt for the image generation"}), + "model": (["V_2", "V_2_TURBO", "V_1", "V_1_TURBO"], {"default": "V_2"}), + }, + "optional": { + "aspect_ratio": (["ASPECT_1_1", "ASPECT_4_3", "ASPECT_3_4", "ASPECT_16_9", "ASPECT_9_16", + "ASPECT_2_1", "ASPECT_1_2", "ASPECT_3_2", "ASPECT_2_3", "ASPECT_4_5", "ASPECT_5_4"], { + "default": "ASPECT_1_1", + "tooltip": "The aspect ratio for image generation. Cannot be used with resolution" + }), + "resolution": (["1024x1024", "1024x1792", "1792x1024"], { + "default": "1024x1024", + "tooltip": "The resolution for image generation (V2 only). Cannot be used with aspect_ratio" + }), + "magic_prompt_option": (["AUTO", "ON", "OFF"], { + "default": "AUTO", + "tooltip": "Determine if MagicPrompt should be used in generation" + }), + "seed": ("INT", { + "default": 0, + "min": 0, + "max": 2147483647, + "step": 1, + "display": "number" + }), + "style_type": (["NONE", "ANIME", "CINEMATIC", "CREATIVE", "DIGITAL_ART", "PHOTOGRAPHIC"], { + "default": "NONE", + "tooltip": "Style type for generation (V2+ only)" + }), + "negative_prompt": ("STRING", { + "multiline": True, + "default": "", + "tooltip": "Description of what to exclude from the image (V1/V2 only)" + }), + "num_images": ("INT", { + "default": 1, + "min": 1, + "max": 8, + "step": 1, + "display": "number" + }), + "color_palette": ("STRING", { + "multiline": False, + "default": "", + "tooltip": "Color palette preset name or hex colors with weights (V2/V2_TURBO only)" + }), + } + } + + RETURN_TYPES = ("IMAGE",) + #RETURN_NAMES = ("image_output_name",) + DESCRIPTION = cleandoc(__doc__) + FUNCTION = "api_call" + + #OUTPUT_NODE = False + #OUTPUT_TOOLTIPS = ("",) # Tooltips for the output node + + CATEGORY = "Example" + + def api_call(self, prompt, model, aspect_ratio=None, resolution=None, + magic_prompt_option="AUTO", seed=0, style_type="NONE", + negative_prompt="", num_images=1, color_palette=""): + import requests + import torch + from PIL import Image + import io + import numpy as np + import time + + # Build payload with all available parameters + payload = { + "image_request": { + "prompt": prompt, + "model": model, + "num_images": num_images, + "seed": seed, + } + } + + # Make API request + headers = { + "Authorization": "Bearer TBD", # TODO(robin): add authorization key + "Content-Type": "application/json" + } + + response = requests.post( + "http://localhost:8080/proxy/ideogram/generate", + headers=headers, + json=payload + ) + + if response.status_code != 200: + raise Exception(f"API request failed: {response.text}") + + # Parse response + response_data = response.json() + + # Get the image URL from the response + image_url = response_data["data"][0]["url"] + + # Time the image download + download_start = time.time() + img_response = requests.get(image_url) + if img_response.status_code != 200: + raise Exception("Failed to download the image") + download_time = (time.time() - download_start) * 1000 # Convert to milliseconds + print(f"Image download time: {download_time:.2f}ms") + + # Time the conversion process + conversion_start = time.time() + img = Image.open(io.BytesIO(img_response.content)) + img = img.convert("RGB") # Ensure RGB format + + # Convert to numpy array, normalize to float32 between 0 and 1 + img_array = np.array(img).astype(np.float32) / 255.0 + + # Convert to torch tensor and add batch dimension + img_tensor = torch.from_numpy(img_array)[None,] + conversion_time = (time.time() - conversion_start) * 1000 # Convert to milliseconds + print(f"Image conversion time: {conversion_time:.2f}ms") + + return (img_tensor,) + + """ + The node will always be re executed if any of the inputs change but + this method can be used to force the node to execute again even when the inputs don't change. + You can make this node return a number or a string. This value will be compared to the one returned the last time the node was + executed, if it is different the node will be executed again. + This method is used in the core repo for the LoadImage node where they return the image hash as a string, if the image hash + changes between executions the LoadImage node is executed again. + """ + #@classmethod + #def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen): + # return "" + + +class RunwayVideoNode: + """ + Generates videos synchronously based on a given image, prompt, and optional parameters using Runway's API. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt_image": ("IMAGE",), # Will need to handle image URL conversion + "prompt_text": ("STRING", { + "multiline": True, + "default": "", + "tooltip": "Text prompt to guide the video generation" + }), + }, + "optional": { + "seed": ("INT", { + "default": 0, + "min": 0, + "max": 4294967295, + "step": 1, + "display": "number" + }), + "model": (["gen3a_turbo"], { + "default": "gen3a_turbo", + "tooltip": "Model to use for video generation" + }), + "duration": ("FLOAT", { + "default": 5.0, + "min": 1.0, + "max": 10.0, + "step": 0.1, + "display": "number", + "tooltip": "Duration of the generated video in seconds" + }), + "ratio": (["1280:768", "768:1280"], { + "default": "1280:768", + "tooltip": "Aspect ratio of the output video" + }), + "watermark": ("BOOLEAN", { + "default": False, + "tooltip": "Whether to include watermark in the output" + }), + } + } + + RETURN_TYPES = ("VIDEO",) + DESCRIPTION = "Generates videos from images using Runway's API" + FUNCTION = "generate_video" + CATEGORY = "video" + + def generate_video(self, prompt_image, prompt_text, seed=0, model="gen3a_turbo", + duration=5.0, ratio="1280:768", watermark=False): + import requests + import torch + import time + import os + + # Hardcoded API key (temporary solution) + api_key = "key_e861661aa0b307e07e8cc269c1f42cf56fcce876ed6511a507e185ee51f695291da21f4777be1326b4467c34be5a6498b72dc27c9780e483250c692aa410d4c6" # Replace with actual API key + + # Convert torch tensor image to URL (you'll need to implement this part) + # This is a placeholder - you'll need to either save the image temporarily + # or upload it to a service that can host it + image_url = "http://example.com" # Placeholder + + # Build payload + payload = { + "promptImage": image_url, + "promptText": prompt_text, + "seed": seed, + "model": model, + "watermark": watermark, + "duration": duration, + "ratio": ratio + } + + # Make API request + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + "X-Runway-Version": "2024-11-06" + } + + # Time the API request + api_start = time.time() + response = requests.post( + "https://api.dev.runwayml.com/v1/image_to_video", + headers=headers, + json=payload + ) + api_time = (time.time() - api_start) * 1000 # Convert to milliseconds + print(f"API request time: {api_time:.2f}ms") + + if response.status_code != 200: + raise Exception(f"API request failed: {response.text}") + + # Parse response + response_data = response.json() + + # Note: You'll need to implement the actual video handling here + # This is a placeholder return + return (None,) + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "IdeogramTextToImage": IdeogramTextToImage, + "RunwayVideoNode": RunwayVideoNode +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "IdeogramTextToImage": "Ideogram Text to Image", + "RunwayVideoNode": "Runway Video Generator" +} diff --git a/nodes.py b/nodes.py index b1ab62aade5..b6761380b03 100644 --- a/nodes.py +++ b/nodes.py @@ -2258,6 +2258,7 @@ def init_builtin_extra_nodes(): "nodes_optimalsteps.py", "nodes_hidream.py", "nodes_fresca.py", + "nodes_api.py", ] import_failed = [] From 92e053d43072b197dc55b50ed9a373b378d4d794 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 15 Apr 2025 13:57:08 -0700 Subject: [PATCH 02/40] Add staging api. --- comfy_extras/nodes_api.py | 101 +++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 55 deletions(-) diff --git a/comfy_extras/nodes_api.py b/comfy_extras/nodes_api.py index 8ab57bb26c5..bb484bbc4a9 100644 --- a/comfy_extras/nodes_api.py +++ b/comfy_extras/nodes_api.py @@ -1,5 +1,10 @@ +# Add API base URL at the top of the file +API_BASE = "https://stagingapi.comfy.org" + from inspect import cleandoc -class IdeogramTextToImage: +from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO + +class IdeogramTextToImage(ComfyNodeABC): """ Generates images synchronously based on a given prompt and optional parameters. @@ -9,7 +14,7 @@ def __init__(self): pass @classmethod - def INPUT_TYPES(s): + def INPUT_TYPES(cls) -> InputTypeDict: """ Return a dictionary which contains config for all input fields. Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT". @@ -26,58 +31,61 @@ def INPUT_TYPES(s): """ return { "required": { - "prompt": ("STRING", {"multiline": True, - "default": "", "tooltip": "Prompt for the image generation"}), - "model": (["V_2", "V_2_TURBO", "V_1", "V_1_TURBO"], {"default": "V_2"}), + "prompt": (IO.STRING, { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }), + "model": (IO.COMBO, { "options": ["V_2", "V_2_TURBO", "V_1", "V_1_TURBO"], "default": "V_2", "tooltip": "Model to use for image generation"}), }, "optional": { - "aspect_ratio": (["ASPECT_1_1", "ASPECT_4_3", "ASPECT_3_4", "ASPECT_16_9", "ASPECT_9_16", - "ASPECT_2_1", "ASPECT_1_2", "ASPECT_3_2", "ASPECT_2_3", "ASPECT_4_5", "ASPECT_5_4"], { - "default": "ASPECT_1_1", - "tooltip": "The aspect ratio for image generation. Cannot be used with resolution" + "aspect_ratio": (IO.COMBO, { "options": ["ASPECT_1_1", "ASPECT_4_3", "ASPECT_3_4", "ASPECT_16_9", "ASPECT_9_16", "ASPECT_2_1", "ASPECT_1_2", "ASPECT_3_2", "ASPECT_2_3", "ASPECT_4_5", "ASPECT_5_4"], "default": "ASPECT_1_1", "tooltip": "The aspect ratio for image generation. Cannot be used with resolution" }), - "resolution": (["1024x1024", "1024x1792", "1792x1024"], { + "resolution": (IO.COMBO, { "options": ["1024x1024", "1024x1792", "1792x1024"], "default": "1024x1024", "tooltip": "The resolution for image generation (V2 only). Cannot be used with aspect_ratio" }), - "magic_prompt_option": (["AUTO", "ON", "OFF"], { + "magic_prompt_option": (IO.COMBO, { "options": ["AUTO", "ON", "OFF"], "default": "AUTO", "tooltip": "Determine if MagicPrompt should be used in generation" }), - "seed": ("INT", { + "seed": (IO.INT, { "default": 0, "min": 0, "max": 2147483647, "step": 1, "display": "number" }), - "style_type": (["NONE", "ANIME", "CINEMATIC", "CREATIVE", "DIGITAL_ART", "PHOTOGRAPHIC"], { + "style_type": (IO.COMBO, { "options": ["NONE", "ANIME", "CINEMATIC", "CREATIVE", "DIGITAL_ART", "PHOTOGRAPHIC"], "default": "NONE", "tooltip": "Style type for generation (V2+ only)" }), - "negative_prompt": ("STRING", { + "negative_prompt": (IO.STRING, { "multiline": True, "default": "", "tooltip": "Description of what to exclude from the image (V1/V2 only)" }), - "num_images": ("INT", { + "num_images": (IO.INT, { "default": 1, "min": 1, "max": 8, "step": 1, "display": "number" }), - "color_palette": ("STRING", { + "color_palette": (IO.STRING, { "multiline": False, "default": "", "tooltip": "Color palette preset name or hex colors with weights (V2/V2_TURBO only)" }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" } } - RETURN_TYPES = ("IMAGE",) + RETURN_TYPES = (IO.IMAGE,) #RETURN_NAMES = ("image_output_name",) - DESCRIPTION = cleandoc(__doc__) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value FUNCTION = "api_call" #OUTPUT_NODE = False @@ -85,15 +93,14 @@ def INPUT_TYPES(s): CATEGORY = "Example" - def api_call(self, prompt, model, aspect_ratio=None, resolution=None, - magic_prompt_option="AUTO", seed=0, style_type="NONE", - negative_prompt="", num_images=1, color_palette=""): + def api_call(self, prompt, model, aspect_ratio=None, resolution=None, + magic_prompt_option="AUTO", seed=0, style_type="NONE", + negative_prompt="", num_images=1, color_palette="", auth_token=None): import requests import torch from PIL import Image import io import numpy as np - import time # Build payload with all available parameters payload = { @@ -107,12 +114,12 @@ def api_call(self, prompt, model, aspect_ratio=None, resolution=None, # Make API request headers = { - "Authorization": "Bearer TBD", # TODO(robin): add authorization key + "Authorization": f"Bearer {auth_token}", "Content-Type": "application/json" } - + response = requests.post( - "http://localhost:8080/proxy/ideogram/generate", + f"{API_BASE}/proxy/ideogram/generate", headers=headers, json=payload ) @@ -122,30 +129,22 @@ def api_call(self, prompt, model, aspect_ratio=None, resolution=None, # Parse response response_data = response.json() - + # Get the image URL from the response image_url = response_data["data"][0]["url"] - - # Time the image download - download_start = time.time() + img_response = requests.get(image_url) if img_response.status_code != 200: raise Exception("Failed to download the image") - download_time = (time.time() - download_start) * 1000 # Convert to milliseconds - print(f"Image download time: {download_time:.2f}ms") - # Time the conversion process - conversion_start = time.time() img = Image.open(io.BytesIO(img_response.content)) img = img.convert("RGB") # Ensure RGB format - + # Convert to numpy array, normalize to float32 between 0 and 1 img_array = np.array(img).astype(np.float32) / 255.0 - + # Convert to torch tensor and add batch dimension img_tensor = torch.from_numpy(img_array)[None,] - conversion_time = (time.time() - conversion_start) * 1000 # Convert to milliseconds - print(f"Image conversion time: {conversion_time:.2f}ms") return (img_tensor,) @@ -208,7 +207,10 @@ def INPUT_TYPES(s): "default": False, "tooltip": "Whether to include watermark in the output" }), - } + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" + }, } RETURN_TYPES = ("VIDEO",) @@ -216,15 +218,9 @@ def INPUT_TYPES(s): FUNCTION = "generate_video" CATEGORY = "video" - def generate_video(self, prompt_image, prompt_text, seed=0, model="gen3a_turbo", - duration=5.0, ratio="1280:768", watermark=False): + def generate_video(self, prompt_image, prompt_text, seed=0, model="gen3a_turbo", + duration=5.0, ratio="1280:768", watermark=False, auth_token=None): import requests - import torch - import time - import os - - # Hardcoded API key (temporary solution) - api_key = "key_e861661aa0b307e07e8cc269c1f42cf56fcce876ed6511a507e185ee51f695291da21f4777be1326b4467c34be5a6498b72dc27c9780e483250c692aa410d4c6" # Replace with actual API key # Convert torch tensor image to URL (you'll need to implement this part) # This is a placeholder - you'll need to either save the image temporarily @@ -244,27 +240,22 @@ def generate_video(self, prompt_image, prompt_text, seed=0, model="gen3a_turbo", # Make API request headers = { - "Authorization": f"Bearer {api_key}", + "Authorization": f"Bearer {auth_token}", "Content-Type": "application/json", - "X-Runway-Version": "2024-11-06" } - # Time the API request - api_start = time.time() response = requests.post( - "https://api.dev.runwayml.com/v1/image_to_video", + f"{API_BASE}/proxy/runway/image_to_video", headers=headers, json=payload ) - api_time = (time.time() - api_start) * 1000 # Convert to milliseconds - print(f"API request time: {api_time:.2f}ms") if response.status_code != 200: raise Exception(f"API request failed: {response.text}") # Parse response - response_data = response.json() - + # response_data = response.json() + # Note: You'll need to implement the actual video handling here # This is a placeholder return return (None,) From 06819aa865faf1d69ae3f285ad9f37cc914aeb3d Mon Sep 17 00:00:00 2001 From: bymyself Date: Tue, 15 Apr 2025 16:38:03 -0700 Subject: [PATCH 03/40] COMFY_API_NODE_NAME node property --- comfy/comfy_types/node_typing.py | 4 +++- server.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index a348791a98f..621721213f6 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -1,7 +1,7 @@ """Comfy-specific type hinting""" from __future__ import annotations -from typing import Literal, TypedDict +from typing import Literal, TypedDict, Optional from typing_extensions import NotRequired from abc import ABC, abstractmethod from enum import Enum @@ -229,6 +229,8 @@ class ComfyNodeABC(ABC): """Flags a node as experimental, informing users that it may change or not work as expected.""" DEPRECATED: bool """Flags a node as deprecated, indicating to users that they should find alternatives to this node.""" + COMFY_API_NODE_NAME: Optional[str] + """If the node is an API node, this is the name used to identify it.""" @classmethod @abstractmethod diff --git a/server.py b/server.py index 0cc97b24812..f7ba25e6175 100644 --- a/server.py +++ b/server.py @@ -580,6 +580,9 @@ def node_info(node_class): info['deprecated'] = True if getattr(obj_class, "EXPERIMENTAL", False): info['experimental'] = True + + if hasattr(obj_class, 'COMFY_API_NODE_NAME'): + info['comfy_api_node_name'] = obj_class.COMFY_API_NODE_NAME return info @routes.get("/object_info") From 5327cc8bbb3ba2bc03a1b3bc60f16c7dd044ec9b Mon Sep 17 00:00:00 2001 From: bymyself Date: Tue, 15 Apr 2025 18:05:21 -0700 Subject: [PATCH 04/40] switch to boolean flag and use original node name for id --- comfy/comfy_types/node_typing.py | 4 ++-- server.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index 621721213f6..dbb757c91d7 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -229,8 +229,8 @@ class ComfyNodeABC(ABC): """Flags a node as experimental, informing users that it may change or not work as expected.""" DEPRECATED: bool """Flags a node as deprecated, indicating to users that they should find alternatives to this node.""" - COMFY_API_NODE_NAME: Optional[str] - """If the node is an API node, this is the name used to identify it.""" + API_NODE: bool + """Flags a node as an API node.""" @classmethod @abstractmethod diff --git a/server.py b/server.py index f7ba25e6175..f64ec27d4a2 100644 --- a/server.py +++ b/server.py @@ -581,8 +581,8 @@ def node_info(node_class): if getattr(obj_class, "EXPERIMENTAL", False): info['experimental'] = True - if hasattr(obj_class, 'COMFY_API_NODE_NAME'): - info['comfy_api_node_name'] = obj_class.COMFY_API_NODE_NAME + if hasattr(obj_class, 'API_NODE'): + info['api_node'] = obj_class.API_NODE return info @routes.get("/object_info") From bdbde1a3c21f953fe50a582fd3e6a09e582dda81 Mon Sep 17 00:00:00 2001 From: bymyself Date: Tue, 15 Apr 2025 18:06:36 -0700 Subject: [PATCH 05/40] add optional to type --- comfy/comfy_types/node_typing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index dbb757c91d7..0bdda032e8a 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -229,7 +229,7 @@ class ComfyNodeABC(ABC): """Flags a node as experimental, informing users that it may change or not work as expected.""" DEPRECATED: bool """Flags a node as deprecated, indicating to users that they should find alternatives to this node.""" - API_NODE: bool + API_NODE: Optional[bool] """Flags a node as an API node.""" @classmethod From 1b42a5ad66db1c1f0005609a57d1bc0f417cb999 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Wed, 16 Apr 2025 13:38:28 -0700 Subject: [PATCH 06/40] Add API_NODE and common error for missing auth token (#5) --- comfy_extras/nodes_api.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/comfy_extras/nodes_api.py b/comfy_extras/nodes_api.py index bb484bbc4a9..839e717ea63 100644 --- a/comfy_extras/nodes_api.py +++ b/comfy_extras/nodes_api.py @@ -4,6 +4,12 @@ from inspect import cleandoc from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO +def check_auth_token(auth_token): + """Verify that an auth token is present.""" + if auth_token is None: + raise Exception("Please login first to use this node.") + return auth_token + class IdeogramTextToImage(ComfyNodeABC): """ Generates images synchronously based on a given prompt and optional parameters. @@ -84,13 +90,9 @@ def INPUT_TYPES(cls) -> InputTypeDict: } RETURN_TYPES = (IO.IMAGE,) - #RETURN_NAMES = ("image_output_name",) DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value FUNCTION = "api_call" - - #OUTPUT_NODE = False - #OUTPUT_TOOLTIPS = ("",) # Tooltips for the output node - + API_NODE = True CATEGORY = "Example" def api_call(self, prompt, model, aspect_ratio=None, resolution=None, @@ -102,6 +104,8 @@ def api_call(self, prompt, model, aspect_ratio=None, resolution=None, import io import numpy as np + check_auth_token(auth_token) + # Build payload with all available parameters payload = { "image_request": { @@ -217,11 +221,12 @@ def INPUT_TYPES(s): DESCRIPTION = "Generates videos from images using Runway's API" FUNCTION = "generate_video" CATEGORY = "video" + API_NODE = True def generate_video(self, prompt_image, prompt_text, seed=0, model="gen3a_turbo", duration=5.0, ratio="1280:768", watermark=False, auth_token=None): import requests - + check_auth_token(auth_token) # Convert torch tensor image to URL (you'll need to implement this part) # This is a placeholder - you'll need to either save the image temporarily # or upload it to a service that can host it From 9592c7af4539c4a2d1228979375f234fd0bfb1a9 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Sat, 19 Apr 2025 13:23:26 -0700 Subject: [PATCH 07/40] Add Minimax Video Generation + Async Task queue polling example (#6) --- .github/workflows/update-api-stubs.yml | 49 ++ comfy_api_nodes/__init__.py | 0 comfy_api_nodes/apis/__init__.py | 0 comfy_api_nodes/apis/client.py | 457 ++++++++++++++++ comfy_api_nodes/apis/stubs.py | 513 ++++++++++++++++++ .../nodes_api.py | 200 ++++--- nodes.py | 8 + uv.lock | 7 + 8 files changed, 1131 insertions(+), 103 deletions(-) create mode 100644 .github/workflows/update-api-stubs.yml create mode 100644 comfy_api_nodes/__init__.py create mode 100644 comfy_api_nodes/apis/__init__.py create mode 100644 comfy_api_nodes/apis/client.py create mode 100644 comfy_api_nodes/apis/stubs.py rename {comfy_extras => comfy_api_nodes}/nodes_api.py (62%) create mode 100644 uv.lock diff --git a/.github/workflows/update-api-stubs.yml b/.github/workflows/update-api-stubs.yml new file mode 100644 index 00000000000..45fbeda9fcf --- /dev/null +++ b/.github/workflows/update-api-stubs.yml @@ -0,0 +1,49 @@ +name: Generate API Models + +on: + schedule: + # Run weekly on Monday at 00:00 UTC + - cron: '0 0 * * 1' + workflow_dispatch: + # Allow manual triggering + +jobs: + generate-models: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install 'datamodel-code-generator[http]' + + - name: Generate API models + run: | + datamodel-codegen --use-subclass-enum --url https://stagingapi.comfy.org/openapi --output comfy_api_nodes/apis/stubs.py + + - name: Check for changes + id: git-check + run: | + git diff --exit-code comfy_extras/apis/stubs.py || echo "changes=true" >> $GITHUB_OUTPUT + + - name: Create Pull Request + if: steps.git-check.outputs.changes == 'true' + uses: peter-evans/create-pull-request@v5 + with: + commit-message: 'chore: update API models from OpenAPI spec' + title: 'Update API models from OpenAPI spec' + body: | + This PR updates the API models based on the latest OpenAPI specification. + + Generated automatically by the Generate API Models workflow. + branch: update-api-models + delete-branch: true + base: main diff --git a/comfy_api_nodes/__init__.py b/comfy_api_nodes/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py new file mode 100644 index 00000000000..2a98cc3015e --- /dev/null +++ b/comfy_api_nodes/apis/client.py @@ -0,0 +1,457 @@ +import logging + +""" +API Client Framework for ComfyUI + +This module provides a flexible framework for making API requests from ComfyUI nodes. +It supports both synchronous and asynchronous API operations with proper type validation. + +Key Components: +-------------- +1. ApiClient - Handles HTTP requests with authentication and error handling +2. ApiEndpoint - Defines a single HTTP endpoint with its request/response models +3. ApiOperation - Executes a single synchronous API operation +4. PollingOperation - Executes an asynchronous operation with polling for completion + +Usage Examples: +-------------- + +# Example 1: Synchronous API Operation +# ------------------------------------ +# For a simple API call that returns the result immediately: + +# 1. Create the API client +api_client = ApiClient( + base_url="https://api.example.com", + api_key="your_api_key_here", + timeout=30.0, + verify_ssl=True +) + +# 2. Define the endpoint +user_info_endpoint = ApiEndpoint( + path="/v1/users/me", + method=HttpMethod.GET, + request_model=EmptyRequest, # No request body needed + response_model=UserProfile, # Pydantic model for the response + query_params=None +) + +# 3. Create the request object +request = EmptyRequest() + +# 4. Create and execute the operation +operation = ApiOperation( + endpoint=user_info_endpoint, + request=request +) +user_profile = operation.execute(client=api_client) # Returns immediately with the result + + +# Example 2: Asynchronous API Operation with Polling +# ------------------------------------------------- +# For an API that starts a task and requires polling for completion: + +# 1. Define the endpoints (initial request and polling) +generate_image_endpoint = ApiEndpoint( + path="/v1/images/generate", + method=HttpMethod.POST, + request_model=ImageGenerationRequest, + response_model=TaskCreatedResponse, + query_params=None +) + +check_task_endpoint = ApiEndpoint( + path="/v1/tasks/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=ImageGenerationResult, + query_params=None +) + +# 2. Create the request object +request = ImageGenerationRequest( + prompt="a beautiful sunset over mountains", + width=1024, + height=1024, + num_images=1 +) + +# 3. Create and execute the polling operation +operation = PollingOperation( + initial_endpoint=generate_image_endpoint, + initial_request=request, + poll_endpoint=check_task_endpoint, + task_id_field="task_id", + status_field="status", + completed_statuses=["completed"], + failed_statuses=["failed", "error"] +) + +# This will make the initial request and then poll until completion +result = operation.execute(client=api_client) # Returns the final ImageGenerationResult when done +""" + +from typing import ( + Dict, + Type, + Optional, + Any, + TypeVar, + Generic, + Callable, +) +from pydantic import BaseModel +from enum import Enum +import time +import json +import requests +from urllib.parse import urljoin + +# Import models from your generated stubs + +T = TypeVar("T", bound=BaseModel) +R = TypeVar("R", bound=BaseModel) +P = TypeVar("P", bound=BaseModel) # For poll response + + +class EmptyRequest(BaseModel): + """Base class for empty request bodies. + For GET requests, fields will be sent as query parameters.""" + + pass + + +class HttpMethod(str, Enum): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + PATCH = "PATCH" + + +class ApiClient: + """ + Client for making HTTP requests to an API with authentication and error handling. + """ + + def __init__( + self, + base_url: str, + api_key: Optional[str] = None, + timeout: float = 30.0, + verify_ssl: bool = True, + ): + self.base_url = base_url + self.api_key = api_key + self.timeout = timeout + self.verify_ssl = verify_ssl + + def get_headers(self) -> Dict[str, str]: + """Get headers for API requests, including authentication if available""" + headers = {"Content-Type": "application/json", "Accept": "application/json"} + + if self.api_key: + headers["Authorization"] = f"Bearer {self.api_key}" + + return headers + + def request( + self, + method: str, + path: str, + params: Optional[Dict[str, Any]] = None, + json: Optional[Dict[str, Any]] = None, + files: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + ) -> Dict[str, Any]: + """ + Make an HTTP request to the API + + Args: + method: HTTP method (GET, POST, etc.) + path: API endpoint path (will be joined with base_url) + params: Query parameters + json: JSON body data + files: Files to upload + headers: Additional headers + + Returns: + Parsed JSON response + + Raises: + requests.RequestException: If the request fails + """ + url = urljoin(self.base_url, path) + self.check_auth_token(self.api_key) + # Combine default headers with any provided headers + request_headers = self.get_headers() + if headers: + request_headers.update(headers) + try: + response = requests.request( + method=method, + url=url, + params=params, + json=json, + files=files, + headers=request_headers, + timeout=self.timeout, + verify=self.verify_ssl, + ) + + # Raise exception for error status codes + response.raise_for_status() + except requests.ConnectionError: + raise Exception( + f"Unable to connect to the API server at {self.base_url}. Please check your internet connection or verify the service is available." + ) + + except requests.Timeout: + raise Exception( + f"Request timed out after {self.timeout} seconds. The server might be experiencing high load or the operation is taking longer than expected." + ) + + except requests.HTTPError as e: + status_code = e.response.status_code if hasattr(e, "response") else None + error_message = f"HTTP Error: {str(e)}" + logging.debug(f"[DEBUG] API Error: {error_message} (Status: {status_code})") + if status_code == 401: + error_message = "Unauthorized: Please login first to use this node." + if status_code == 402: + error_message = "Payment Required: Please add credits to your account to use this node." + raise Exception(error_message) + + # Parse and return JSON response + if response.content: + return response.json() + return {} + + def check_auth_token(self, auth_token): + """Verify that an auth token is present.""" + if auth_token is None: + raise Exception("Please login first to use this node.") + return auth_token + + +class ApiEndpoint(Generic[T, R]): + """Defines an API endpoint with its request and response types""" + + def __init__( + self, + path: str, + method: HttpMethod, + request_model: Type[T], + response_model: Type[R], + query_params: Optional[Dict[str, Any]] = None, + ): + """Initialize an API endpoint definition. + + Args: + path: The URL path for this endpoint, can include placeholders like {id} + method: The HTTP method to use (GET, POST, etc.) + request_model: Pydantic model class that defines the structure and validation rules for API requests to this endpoint + response_model: Pydantic model class that defines the structure and validation rules for API responses from this endpoint + query_params: Optional dictionary of query parameters to include in the request + """ + self.path = path + self.method = method + self.request_model = request_model + self.response_model = response_model + self.query_params = query_params or {} + + +class SynchronousOperation(Generic[T, R]): + """ + Represents a single synchronous API operation. + """ + + def __init__( + self, + endpoint: ApiEndpoint[T, R], + request: T, + api_base: str = "https://stagingapi.comfy.org", + auth_token: Optional[str] = None, + timeout: float = 30.0, + verify_ssl: bool = True, + ): + self.endpoint = endpoint + self.request = request + self.response = None + self.error = None + self.api_base = api_base + self.auth_token = auth_token + self.timeout = timeout + self.verify_ssl = verify_ssl + + def execute(self, client: Optional[ApiClient] = None) -> R: + """Execute the API operation using the provided client or create one""" + try: + # Create client if not provided + if client is None: + if self.api_base is None: + raise ValueError("Either client or api_base must be provided") + client = ApiClient( + base_url=self.api_base, + api_key=self.auth_token, + timeout=self.timeout, + verify_ssl=self.verify_ssl, + ) + + # Convert request model to dict + request_dict = self.request.model_dump(exclude_none=True) + + # Debug log for request + logging.debug(f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}") + logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}") + logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}") + + # Make the request + resp = client.request( + method=self.endpoint.method.value, + path=self.endpoint.path, + json=request_dict, + params=self.endpoint.query_params, + ) + + # Debug log for response + logging.debug(f"[DEBUG] API Response: {json.dumps(resp, indent=2)}") + + # Parse and return the response + return self._parse_response(resp) + + except Exception as e: + logging.debug(f"[DEBUG] API Exception: {str(e)}") + raise Exception(str(e)) + + def _parse_response(self, resp): + """Parse response data - can be overridden by subclasses""" + # The response is already the complete object, don't extract just the "data" field + # as that would lose the outer structure (created timestamp, etc.) + + # Parse response using the provided model + self.response = self.endpoint.response_model.model_validate(resp) + logging.debug(f"[DEBUG] Parsed Response: {self.response}") + return self.response + + +class TaskStatus(str, Enum): + """Enum for task status values""" + + COMPLETED = "completed" + FAILED = "failed" + PENDING = "pending" + + +class PollingOperation(Generic[T, R]): + """ + Represents an asynchronous API operation that requires polling for completion. + """ + + def __init__( + self, + poll_endpoint: ApiEndpoint[EmptyRequest, R], + completed_statuses: list, + failed_statuses: list, + status_extractor: Callable[[R], str], + request: Optional[T] = None, + api_base: str = "https://stagingapi.comfy.org", + auth_token: Optional[str] = None, + poll_interval: float = 1.0, + ): + self.poll_endpoint = poll_endpoint + self.request = request + self.api_base = api_base + self.auth_token = auth_token + self.poll_interval = poll_interval + + # Polling configuration + self.status_extractor = status_extractor or ( + lambda x: getattr(x, "status", None) + ) + self.completed_statuses = completed_statuses + self.failed_statuses = failed_statuses + + # For storing response data + self.final_response = None + self.error = None + + def execute(self, client: Optional[ApiClient] = None) -> R: + """Execute the polling operation using the provided client. If failed, raise an exception.""" + try: + if client is None: + client = ApiClient( + base_url=self.api_base, + api_key=self.auth_token, + ) + return self._poll_until_complete(client) + except Exception as e: + raise Exception(f"Error during polling: {str(e)}") + + def _check_task_status(self, response: R) -> TaskStatus: + """Check task status using the status extractor function""" + try: + status = self.status_extractor(response) + if status in self.completed_statuses: + return TaskStatus.COMPLETED + elif status in self.failed_statuses: + return TaskStatus.FAILED + return TaskStatus.PENDING + except Exception as e: + logging.debug(f"Error extracting status: {e}") + return TaskStatus.PENDING + + def _poll_until_complete(self, client: ApiClient) -> R: + """Poll until the task is complete""" + poll_count = 0 + while True: + try: + poll_count += 1 + logging.debug(f"[DEBUG] Polling attempt #{poll_count}") + + request_dict = ( + self.request.model_dump(exclude_none=True) + if self.request is not None + else None + ) + + if poll_count == 1: + logging.debug( + f"[DEBUG] Poll Request: {self.poll_endpoint.method.value} {self.poll_endpoint.path}" + ) + logging.debug( + f"[DEBUG] Poll Request Data: {json.dumps(request_dict, indent=2) if request_dict else 'None'}" + ) + + # Query task status + resp = client.request( + method=self.poll_endpoint.method.value, + path=self.poll_endpoint.path, + params=self.poll_endpoint.query_params, + json=request_dict, + ) + + # Parse response + response_obj = self.poll_endpoint.response_model.model_validate(resp) + + # Check if task is complete + status = self._check_task_status(response_obj) + logging.debug(f"[DEBUG] Task Status: {status}") + + if status == TaskStatus.COMPLETED: + logging.debug("[DEBUG] Task completed successfully") + self.final_response = response_obj + return self.final_response + elif status == TaskStatus.FAILED: + logging.debug(f"[DEBUG] Task failed: {json.dumps(resp)}") + raise Exception(f"Task failed: {json.dumps(resp)}") + else: + logging.debug("[DEBUG] Task still pending, continuing to poll...") + + # Wait before polling again + logging.debug(f"[DEBUG] Waiting {self.poll_interval} seconds before next poll") + time.sleep(self.poll_interval) + + except Exception as e: + logging.debug(f"[DEBUG] Polling error: {str(e)}") + raise Exception(f"Error while polling: {str(e)}") diff --git a/comfy_api_nodes/apis/stubs.py b/comfy_api_nodes/apis/stubs.py new file mode 100644 index 00000000000..d1da6a5ab36 --- /dev/null +++ b/comfy_api_nodes/apis/stubs.py @@ -0,0 +1,513 @@ +# generated by datamodel-codegen: +# filename: http://localhost:8080/openapi +# timestamp: 2025-04-18T21:35:21+00:00 + +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional +from uuid import UUID + +from pydantic import BaseModel, Field, conint, constr + + +class ComfyNode(BaseModel): + category: Optional[str] = Field( + None, + description='UI category where the node is listed, used for grouping nodes.', + ) + comfy_node_name: Optional[str] = Field( + None, description='Unique identifier for the node' + ) + deprecated: Optional[bool] = Field( + None, + description='Indicates if the node is deprecated. Deprecated nodes are hidden in the UI.', + ) + description: Optional[str] = Field( + None, description="Brief description of the node's functionality or purpose." + ) + experimental: Optional[bool] = Field( + None, + description='Indicates if the node is experimental, subject to changes or removal.', + ) + function: Optional[str] = Field( + None, description='Name of the entry-point function to execute the node.' + ) + input_types: Optional[str] = Field(None, description='Defines input parameters') + output_is_list: Optional[List[bool]] = Field( + None, description='Boolean values indicating if each output is a list.' + ) + return_names: Optional[str] = Field( + None, description='Names of the outputs for clarity in workflows.' + ) + return_types: Optional[str] = Field( + None, description='Specifies the types of outputs produced by the node.' + ) + + +class ComfyNodeCloudBuildInfo(BaseModel): + build_id: Optional[str] = None + location: Optional[str] = None + project_id: Optional[str] = None + project_number: Optional[str] = None + + +class Customer(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='The date and time the user was created' + ) + email: Optional[str] = Field(None, description='The email address for this user') + id: str = Field(..., description='The firebase UID of the user') + name: Optional[str] = Field(None, description='The name for this user') + updatedAt: Optional[datetime] = Field( + None, description='The date and time the user was last updated' + ) + + +class Error(BaseModel): + details: Optional[List[str]] = Field( + None, + description='Optional detailed information about the error or hints for resolving it.', + ) + message: Optional[str] = Field( + None, description='A clear and concise description of the error.' + ) + + +class ErrorResponse(BaseModel): + error: str + message: str + + +class GitCommitSummary(BaseModel): + author: Optional[str] = Field(None, description='The author of the commit') + branch_name: Optional[str] = Field( + None, description='The branch where the commit was made' + ) + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_name: Optional[str] = Field(None, description='The name of the commit') + status_summary: Optional[Dict[str, str]] = Field( + None, description='A map of operating system to status pairs' + ) + timestamp: Optional[datetime] = Field( + None, description='The timestamp when the commit was made' + ) + + +class ImageRequest(BaseModel): + aspect_ratio: Optional[str] = Field( + None, + description="Optional. The aspect ratio (e.g., 'ASPECT_16_9', 'ASPECT_1_1'). Cannot be used with resolution. Defaults to 'ASPECT_1_1' if unspecified.", + ) + color_palette: Optional[Dict[str, Any]] = Field( + None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' + ) + magic_prompt_option: Optional[str] = Field( + None, description="Optional. MagicPrompt usage ('AUTO', 'ON', 'OFF')." + ) + model: Optional[str] = Field( + None, + description="Optional. The model used (e.g., 'V_2', 'V_2A_TURBO'). Defaults to 'V_2' if unspecified.", + ) + negative_prompt: Optional[str] = Field( + None, + description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', + ) + num_images: Optional[conint(ge=1, le=8)] = Field( + 1, description='Optional. Number of images to generate (1-8). Defaults to 1.' + ) + prompt: str = Field( + ..., description='Required. The prompt to use to generate the image.' + ) + resolution: Optional[str] = Field( + None, + description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", + ) + seed: Optional[conint(ge=0, le=2147483647)] = Field( + None, description='Optional. A number between 0 and 2147483647.' + ) + style_type: Optional[str] = Field( + None, + description="Optional. Style type ('AUTO', 'GENERAL', 'REALISTIC', 'DESIGN', 'RENDER_3D', 'ANIME'). Only for models V_2 and above.", + ) + + +class IdeogramGenerateRequest(BaseModel): + image_request: ImageRequest = Field( + ..., description='The image generation request parameters.' + ) + + +class Datum(BaseModel): + is_image_safe: Optional[bool] = Field( + None, description='Indicates whether the image is considered safe.' + ) + prompt: Optional[str] = Field( + None, description='The prompt used to generate this image.' + ) + resolution: Optional[str] = Field( + None, description="The resolution of the generated image (e.g., '1024x1024')." + ) + seed: Optional[int] = Field( + None, description='The seed value used for this generation.' + ) + style_type: Optional[str] = Field( + None, + description="The style type used for generation (e.g., 'REALISTIC', 'ANIME').", + ) + url: Optional[str] = Field(None, description='URL to the generated image.') + + +class IdeogramGenerateResponse(BaseModel): + created: Optional[datetime] = Field( + None, description='Timestamp when the generation was created.' + ) + data: Optional[List[Datum]] = Field( + None, description='Array of generated image information.' + ) + + +class MachineStats(BaseModel): + cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') + disk_capacity: Optional[str] = Field( + None, description='Total disk capacity on the machine.' + ) + gpu_type: Optional[str] = Field( + None, description='The GPU type. eg. NVIDIA Tesla K80' + ) + initial_cpu: Optional[str] = Field( + None, description='Initial CPU available before the job starts.' + ) + initial_disk: Optional[str] = Field( + None, description='Initial disk available before the job starts.' + ) + initial_ram: Optional[str] = Field( + None, description='Initial RAM available before the job starts.' + ) + machine_name: Optional[str] = Field(None, description='Name of the machine.') + memory_capacity: Optional[str] = Field( + None, description='Total memory on the machine.' + ) + os_version: Optional[str] = Field( + None, description='The operating system version. eg. Ubuntu Linux 20.04' + ) + pip_freeze: Optional[str] = Field(None, description='The pip freeze output') + vram_time_series: Optional[Dict[str, Any]] = Field( + None, description='Time series of VRAM usage.' + ) + + +class MinimaxBaseResponse(BaseModel): + status_code: int = Field( + ..., + description='Status code. 0 indicates success, other values indicate errors.', + ) + status_msg: str = Field( + ..., description='Specific error details or success message.' + ) + + +class File(BaseModel): + bytes: Optional[int] = Field(None, description='File size in bytes') + created_at: Optional[int] = Field( + None, description='Unix timestamp when the file was created, in seconds' + ) + download_url: Optional[str] = Field( + None, description='The URL to download the video' + ) + file_id: Optional[int] = Field(None, description='Unique identifier for the file') + filename: Optional[str] = Field(None, description='The name of the file') + purpose: Optional[str] = Field(None, description='The purpose of using the file') + + +class MinimaxFileRetrieveResponse(BaseModel): + base_resp: MinimaxBaseResponse + file: File + + +class Status(str, Enum): + Queueing = 'Queueing' + Preparing = 'Preparing' + Processing = 'Processing' + Success = 'Success' + Fail = 'Fail' + + +class MinimaxTaskResultResponse(BaseModel): + base_resp: MinimaxBaseResponse + file_id: Optional[str] = Field( + None, + description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', + ) + status: Status = Field( + ..., + description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", + ) + task_id: str = Field(..., description='The task ID being queried.') + + +class Model(str, Enum): + T2V_01_Director = 'T2V-01-Director' + I2V_01_Director = 'I2V-01-Director' + S2V_01 = 'S2V-01' + I2V_01 = 'I2V-01' + I2V_01_live = 'I2V-01-live' + T2V_01 = 'T2V-01' + + +class SubjectReferenceItem(BaseModel): + image: Optional[str] = Field( + None, description='URL or base64 encoding of the subject reference image.' + ) + mask: Optional[str] = Field( + None, + description='URL or base64 encoding of the mask for the subject reference image.', + ) + + +class MinimaxVideoGenerationRequest(BaseModel): + callback_url: Optional[str] = Field( + None, + description='Optional. URL to receive real-time status updates about the video generation task.', + ) + first_frame_image: Optional[str] = Field( + None, + description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', + ) + model: Model = Field( + ..., + description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', + ) + prompt: Optional[constr(max_length=2000)] = Field( + None, + description='Description of the video. Should be less than 2000 characters. Supports camera movement instructions in [brackets].', + ) + prompt_optimizer: Optional[bool] = Field( + True, + description='If true (default), the model will automatically optimize the prompt. Set to false for more precise control.', + ) + subject_reference: Optional[List[SubjectReferenceItem]] = Field( + None, + description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', + ) + + +class MinimaxVideoGenerationResponse(BaseModel): + base_resp: MinimaxBaseResponse + task_id: str = Field( + ..., description='The task ID for the asynchronous video generation task.' + ) + + +class NodeStatus(str, Enum): + NodeStatusActive = 'NodeStatusActive' + NodeStatusDeleted = 'NodeStatusDeleted' + NodeStatusBanned = 'NodeStatusBanned' + + +class NodeVersionStatus(str, Enum): + NodeVersionStatusActive = 'NodeVersionStatusActive' + NodeVersionStatusDeleted = 'NodeVersionStatusDeleted' + NodeVersionStatusBanned = 'NodeVersionStatusBanned' + NodeVersionStatusPending = 'NodeVersionStatusPending' + NodeVersionStatusFlagged = 'NodeVersionStatusFlagged' + + +class NodeVersionUpdateRequest(BaseModel): + changelog: Optional[str] = Field( + None, description='The changelog describing the version changes.' + ) + deprecated: Optional[bool] = Field( + None, description='Whether the version is deprecated.' + ) + + +class PersonalAccessToken(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='[Output Only]The date and time the token was created.' + ) + description: Optional[str] = Field( + None, + description="Optional. A more detailed description of the token's intended use.", + ) + id: Optional[UUID] = Field(None, description='Unique identifier for the GitCommit') + name: Optional[str] = Field( + None, + description='Required. The name of the token. Can be a simple description.', + ) + token: Optional[str] = Field( + None, + description='[Output Only]. The personal access token. Only returned during creation.', + ) + + +class PublisherStatus(str, Enum): + PublisherStatusActive = 'PublisherStatusActive' + PublisherStatusBanned = 'PublisherStatusBanned' + + +class PublisherUser(BaseModel): + email: Optional[str] = Field(None, description='The email address for this user.') + id: Optional[str] = Field(None, description='The unique id for this user.') + name: Optional[str] = Field(None, description='The name for this user.') + + +class StorageFile(BaseModel): + file_path: Optional[str] = Field(None, description='Path to the file in storage') + id: Optional[UUID] = Field( + None, description='Unique identifier for the storage file' + ) + public_url: Optional[str] = Field(None, description='Public URL') + + +class User(BaseModel): + email: Optional[str] = Field(None, description='The email address for this user.') + id: Optional[str] = Field(None, description='The unique id for this user.') + isAdmin: Optional[bool] = Field( + None, description='Indicates if the user has admin privileges.' + ) + isApproved: Optional[bool] = Field( + None, description='Indicates if the user is approved.' + ) + name: Optional[str] = Field(None, description='The name for this user.') + + +class WorkflowRunStatus(str, Enum): + WorkflowRunStatusStarted = 'WorkflowRunStatusStarted' + WorkflowRunStatusFailed = 'WorkflowRunStatusFailed' + WorkflowRunStatusCompleted = 'WorkflowRunStatusCompleted' + + +class ActionJobResult(BaseModel): + action_job_id: Optional[str] = Field( + None, description='Identifier of the job this result belongs to' + ) + action_run_id: Optional[str] = Field( + None, description='Identifier of the run this result belongs to' + ) + author: Optional[str] = Field(None, description='The author of the commit') + avg_vram: Optional[int] = Field( + None, description='The average VRAM used by the job' + ) + branch_name: Optional[str] = Field( + None, description='Name of the relevant git branch' + ) + comfy_run_flags: Optional[str] = Field( + None, description='The comfy run flags. E.g. `--low-vram`' + ) + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_id: Optional[str] = Field(None, description='The ID of the commit') + commit_message: Optional[str] = Field(None, description='The message of the commit') + commit_time: Optional[int] = Field( + None, description='The Unix timestamp when the commit was made' + ) + cuda_version: Optional[str] = Field(None, description='CUDA version used') + end_time: Optional[int] = Field( + None, description='The end time of the job as a Unix timestamp.' + ) + git_repo: Optional[str] = Field(None, description='The repository name') + id: Optional[UUID] = Field(None, description='Unique identifier for the job result') + job_trigger_user: Optional[str] = Field( + None, description='The user who triggered the job.' + ) + machine_stats: Optional[MachineStats] = None + operating_system: Optional[str] = Field(None, description='Operating system used') + peak_vram: Optional[int] = Field(None, description='The peak VRAM used by the job') + pr_number: Optional[str] = Field(None, description='The pull request number') + python_version: Optional[str] = Field(None, description='PyTorch version used') + pytorch_version: Optional[str] = Field(None, description='PyTorch version used') + start_time: Optional[int] = Field( + None, description='The start time of the job as a Unix timestamp.' + ) + status: Optional[WorkflowRunStatus] = None + storage_file: Optional[StorageFile] = None + workflow_name: Optional[str] = Field(None, description='Name of the workflow') + + +class NodeVersion(BaseModel): + changelog: Optional[str] = Field( + None, description='Summary of changes made in this version' + ) + comfy_node_extract_status: Optional[str] = Field( + None, description='The status of comfy node extraction process.' + ) + createdAt: Optional[datetime] = Field( + None, description='The date and time the version was created.' + ) + dependencies: Optional[List[str]] = Field( + None, description='A list of pip dependencies required by the node.' + ) + deprecated: Optional[bool] = Field( + None, description='Indicates if this version is deprecated.' + ) + downloadUrl: Optional[str] = Field( + None, description='[Output Only] URL to download this version of the node' + ) + id: Optional[str] = None + node_id: Optional[str] = Field( + None, description='The unique identifier of the node.' + ) + status: Optional[NodeVersionStatus] = None + status_reason: Optional[str] = Field( + None, description='The reason for the status change.' + ) + version: Optional[str] = Field( + None, + description='The version identifier, following semantic versioning. Must be unique for the node.', + ) + + +class PublisherMember(BaseModel): + id: Optional[str] = Field( + None, description='The unique identifier for the publisher member.' + ) + role: Optional[str] = Field( + None, description='The role of the user in the publisher.' + ) + user: Optional[PublisherUser] = None + + +class Publisher(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='The date and time the publisher was created.' + ) + description: Optional[str] = None + id: Optional[str] = Field( + None, + description="The unique identifier for the publisher. It's akin to a username. Should be lowercase.", + ) + logo: Optional[str] = Field(None, description="URL to the publisher's logo.") + members: Optional[List[PublisherMember]] = Field( + None, description='A list of members in the publisher.' + ) + name: Optional[str] = None + source_code_repo: Optional[str] = None + status: Optional[PublisherStatus] = None + support: Optional[str] = None + website: Optional[str] = None + + +class Node(BaseModel): + author: Optional[str] = None + category: Optional[str] = Field(None, description='The category of the node.') + description: Optional[str] = None + downloads: Optional[int] = Field( + None, description='The number of downloads of the node.' + ) + icon: Optional[str] = Field(None, description="URL to the node's icon.") + id: Optional[str] = Field(None, description='The unique identifier of the node.') + latest_version: Optional[NodeVersion] = None + license: Optional[str] = Field( + None, description="The path to the LICENSE file in the node's repository." + ) + name: Optional[str] = Field(None, description='The display name of the node.') + publisher: Optional[Publisher] = None + rating: Optional[float] = Field(None, description='The average rating of the node.') + repository: Optional[str] = Field(None, description="URL to the node's repository.") + status: Optional[NodeStatus] = None + status_detail: Optional[str] = Field( + None, description='The status detail of the node.' + ) + tags: Optional[List[str]] = None + translations: Optional[Dict[str, Dict[str, Any]]] = None diff --git a/comfy_extras/nodes_api.py b/comfy_api_nodes/nodes_api.py similarity index 62% rename from comfy_extras/nodes_api.py rename to comfy_api_nodes/nodes_api.py index 839e717ea63..217de50c9cd 100644 --- a/comfy_extras/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -1,8 +1,8 @@ -# Add API base URL at the top of the file -API_BASE = "https://stagingapi.comfy.org" - from inspect import cleandoc from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO +from comfy_api_nodes.apis.client import ApiEndpoint, SynchronousOperation, HttpMethod, PollingOperation, EmptyRequest +from comfy_api_nodes.apis.stubs import IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest, MinimaxVideoGenerationRequest, MinimaxVideoGenerationResponse, MinimaxFileRetrieveResponse, MinimaxTaskResultResponse, Model +import logging def check_auth_token(auth_token): """Verify that an auth token is present.""" @@ -98,45 +98,44 @@ def INPUT_TYPES(cls) -> InputTypeDict: def api_call(self, prompt, model, aspect_ratio=None, resolution=None, magic_prompt_option="AUTO", seed=0, style_type="NONE", negative_prompt="", num_images=1, color_palette="", auth_token=None): - import requests import torch from PIL import Image import io import numpy as np + import requests - check_auth_token(auth_token) - - # Build payload with all available parameters - payload = { - "image_request": { - "prompt": prompt, - "model": model, - "num_images": num_images, - "seed": seed, - } - } - - # Make API request - headers = { - "Authorization": f"Bearer {auth_token}", - "Content-Type": "application/json" - } - - response = requests.post( - f"{API_BASE}/proxy/ideogram/generate", - headers=headers, - json=payload + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/ideogram/generate", + method=HttpMethod.POST, + request_model=IdeogramGenerateRequest, + response_model=IdeogramGenerateResponse + ), + request=IdeogramGenerateRequest( + image_request=ImageRequest( + prompt=prompt, + model=model, + num_images=num_images, + seed=seed, + aspect_ratio=aspect_ratio if aspect_ratio != "ASPECT_1_1" else None, + resolution=resolution if resolution != "1024x1024" else None, + magic_prompt_option=magic_prompt_option if magic_prompt_option != "AUTO" else None, + style_type=style_type if style_type != "NONE" else None, + negative_prompt=negative_prompt if negative_prompt else None, + color_palette=None + ) + ), + auth_token=auth_token ) - if response.status_code != 200: - raise Exception(f"API request failed: {response.text}") - - # Parse response - response_data = response.json() + response = operation.execute() - # Get the image URL from the response - image_url = response_data["data"][0]["url"] + if not response.data or len(response.data) == 0: + raise Exception("No images were generated in the response") + image_url = response.data[0].url + if not image_url: + raise Exception("No image URL was generated in the response") img_response = requests.get(image_url) if img_response.status_code != 200: raise Exception("Failed to download the image") @@ -165,9 +164,9 @@ def api_call(self, prompt, model, aspect_ratio=None, resolution=None, # return "" -class RunwayVideoNode: +class MinimaxVideoNode: """ - Generates videos synchronously based on a given image, prompt, and optional parameters using Runway's API. + Generates videos synchronously based on a prompt, and optional parameters using Minimax's API. """ def __init__(self): pass @@ -176,41 +175,15 @@ def __init__(self): def INPUT_TYPES(s): return { "required": { - "prompt_image": ("IMAGE",), # Will need to handle image URL conversion "prompt_text": ("STRING", { "multiline": True, "default": "", "tooltip": "Text prompt to guide the video generation" }), - }, - "optional": { - "seed": ("INT", { - "default": 0, - "min": 0, - "max": 4294967295, - "step": 1, - "display": "number" - }), - "model": (["gen3a_turbo"], { - "default": "gen3a_turbo", + "model": (["T2V-01", "I2V-01-Director", "S2V-01", "I2V-01", "I2V-01-live", "T2V-01"], { + "default": "T2V-01", "tooltip": "Model to use for video generation" }), - "duration": ("FLOAT", { - "default": 5.0, - "min": 1.0, - "max": 10.0, - "step": 0.1, - "display": "number", - "tooltip": "Duration of the generated video in seconds" - }), - "ratio": (["1280:768", "768:1280"], { - "default": "1280:768", - "tooltip": "Aspect ratio of the output video" - }), - "watermark": ("BOOLEAN", { - "default": False, - "tooltip": "Whether to include watermark in the output" - }), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG" @@ -218,62 +191,83 @@ def INPUT_TYPES(s): } RETURN_TYPES = ("VIDEO",) - DESCRIPTION = "Generates videos from images using Runway's API" + DESCRIPTION = "Generates videos from prompts using Minimax's API" FUNCTION = "generate_video" CATEGORY = "video" API_NODE = True - - def generate_video(self, prompt_image, prompt_text, seed=0, model="gen3a_turbo", - duration=5.0, ratio="1280:768", watermark=False, auth_token=None): - import requests - check_auth_token(auth_token) - # Convert torch tensor image to URL (you'll need to implement this part) - # This is a placeholder - you'll need to either save the image temporarily - # or upload it to a service that can host it - image_url = "http://example.com" # Placeholder - - # Build payload - payload = { - "promptImage": image_url, - "promptText": prompt_text, - "seed": seed, - "model": model, - "watermark": watermark, - "duration": duration, - "ratio": ratio - } - - # Make API request - headers = { - "Authorization": f"Bearer {auth_token}", - "Content-Type": "application/json", - } - - response = requests.post( - f"{API_BASE}/proxy/runway/image_to_video", - headers=headers, - json=payload + OUTPUT_NODE = True + + def generate_video(self, prompt_text, seed=0, model="T2V-01", auth_token=None): + video_generate_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/video_generation", + method=HttpMethod.POST, + request_model=MinimaxVideoGenerationRequest, + response_model=MinimaxVideoGenerationResponse, + ), + request=MinimaxVideoGenerationRequest( + model=Model(model), + prompt=prompt_text, + callback_url=None, + first_frame_image=None, + subject_reference=None, + prompt_optimizer=None + ), + auth_token=auth_token + ) + response = video_generate_operation.execute() + + task_id = response.task_id + + video_generate_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path="/proxy/minimax/query/video_generation", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxTaskResultResponse, + query_params={ + "task_id": task_id + } + ), + completed_statuses=["Success"], + failed_statuses=["Fail"], + status_extractor=lambda x: x.status.value, + auth_token=auth_token + ) + task_result = video_generate_operation.execute() + + file_id = task_result.file_id + + file_retrieve_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/files/retrieve", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxFileRetrieveResponse, + query_params={ + "file_id": file_id + } + ), + request=EmptyRequest(), + auth_token=auth_token ) + file_result = file_retrieve_operation.execute() - if response.status_code != 200: - raise Exception(f"API request failed: {response.text}") + file_url = file_result.file.download_url - # Parse response - # response_data = response.json() + logging.info(f"Generated video URL: {file_url}") - # Note: You'll need to implement the actual video handling here - # This is a placeholder return return (None,) # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique NODE_CLASS_MAPPINGS = { "IdeogramTextToImage": IdeogramTextToImage, - "RunwayVideoNode": RunwayVideoNode + "MinimaxVideoNode": MinimaxVideoNode } # A dictionary that contains the friendly/humanly readable titles for the nodes NODE_DISPLAY_NAME_MAPPINGS = { "IdeogramTextToImage": "Ideogram Text to Image", - "RunwayVideoNode": "Runway Video Generator" + "MinimaxVideoNode": "Minimax Video Generator" } diff --git a/nodes.py b/nodes.py index b6761380b03..73a62d93035 100644 --- a/nodes.py +++ b/nodes.py @@ -2258,6 +2258,10 @@ def init_builtin_extra_nodes(): "nodes_optimalsteps.py", "nodes_hidream.py", "nodes_fresca.py", + ] + + api_nodes_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_api_nodes") + api_nodes_files = [ "nodes_api.py", ] @@ -2266,6 +2270,10 @@ def init_builtin_extra_nodes(): if not load_custom_node(os.path.join(extras_dir, node_file), module_parent="comfy_extras"): import_failed.append(node_file) + for node_file in api_nodes_files: + if not load_custom_node(os.path.join(api_nodes_dir, node_file), module_parent="comfy_api_nodes"): + import_failed.append(node_file) + return import_failed diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000000..df06d6dc747 --- /dev/null +++ b/uv.lock @@ -0,0 +1,7 @@ +version = 1 +requires-python = ">=3.9" + +[[package]] +name = "comfyui" +version = "0.3.26" +source = { virtual = "." } From 0e32c61481cd5c8cd978e87f58b11b0e8ef73f7a Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 21 Apr 2025 07:39:52 +0800 Subject: [PATCH 08/40] [Minimax] Show video preview and embed workflow in ouput (#7) --- comfy_api_nodes/nodes_api.py | 126 ++++++++++++++++++++++++++++------- 1 file changed, 103 insertions(+), 23 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 217de50c9cd..be2bd59f0c5 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -1,8 +1,15 @@ +import os +import requests from inspect import cleandoc from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO from comfy_api_nodes.apis.client import ApiEndpoint, SynchronousOperation, HttpMethod, PollingOperation, EmptyRequest from comfy_api_nodes.apis.stubs import IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest, MinimaxVideoGenerationRequest, MinimaxVideoGenerationResponse, MinimaxFileRetrieveResponse, MinimaxTaskResultResponse, Model +import folder_paths import logging +from comfy.comfy_types.node_typing import FileLocator +import json +import av + def check_auth_token(auth_token): """Verify that an auth token is present.""" @@ -168,25 +175,55 @@ class MinimaxVideoNode: """ Generates videos synchronously based on a prompt, and optional parameters using Minimax's API. """ + def __init__(self): - pass + self.output_dir = folder_paths.get_output_directory() + self.type = "output" @classmethod def INPUT_TYPES(s): return { "required": { - "prompt_text": ("STRING", { - "multiline": True, - "default": "", - "tooltip": "Text prompt to guide the video generation" - }), - "model": (["T2V-01", "I2V-01-Director", "S2V-01", "I2V-01", "I2V-01-live", "T2V-01"], { - "default": "T2V-01", - "tooltip": "Model to use for video generation" - }), + "prompt_text": ( + "STRING", + { + "multiline": True, + "default": "", + "tooltip": "Text prompt to guide the video generation", + }, + ), + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "model": ( + [ + "T2V-01", + "I2V-01-Director", + "S2V-01", + "I2V-01", + "I2V-01-live", + "T2V-01", + ], + { + "default": "T2V-01", + "tooltip": "Model to use for video generation", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), }, "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG" + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + "auth_token": "AUTH_TOKEN_COMFY_ORG", }, } @@ -197,7 +234,16 @@ def INPUT_TYPES(s): API_NODE = True OUTPUT_NODE = True - def generate_video(self, prompt_text, seed=0, model="T2V-01", auth_token=None): + def generate_video( + self, + prompt_text, + filename_prefix, + seed=0, + model="T2V-01", + prompt=None, + extra_pnginfo=None, + auth_token=None, + ): video_generate_operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/minimax/video_generation", @@ -211,9 +257,9 @@ def generate_video(self, prompt_text, seed=0, model="T2V-01", auth_token=None): callback_url=None, first_frame_image=None, subject_reference=None, - prompt_optimizer=None + prompt_optimizer=None, ), - auth_token=auth_token + auth_token=auth_token, ) response = video_generate_operation.execute() @@ -225,14 +271,12 @@ def generate_video(self, prompt_text, seed=0, model="T2V-01", auth_token=None): method=HttpMethod.GET, request_model=EmptyRequest, response_model=MinimaxTaskResultResponse, - query_params={ - "task_id": task_id - } + query_params={"task_id": task_id}, ), completed_statuses=["Success"], failed_statuses=["Fail"], status_extractor=lambda x: x.status.value, - auth_token=auth_token + auth_token=auth_token, ) task_result = video_generate_operation.execute() @@ -244,12 +288,10 @@ def generate_video(self, prompt_text, seed=0, model="T2V-01", auth_token=None): method=HttpMethod.GET, request_model=EmptyRequest, response_model=MinimaxFileRetrieveResponse, - query_params={ - "file_id": file_id - } + query_params={"file_id": file_id}, ), request=EmptyRequest(), - auth_token=auth_token + auth_token=auth_token, ) file_result = file_retrieve_operation.execute() @@ -257,7 +299,45 @@ def generate_video(self, prompt_text, seed=0, model="T2V-01", auth_token=None): logging.info(f"Generated video URL: {file_url}") - return (None,) + # Construct the save path + full_output_folder, filename, counter, subfolder, filename_prefix = ( + folder_paths.get_save_image_path(filename_prefix, self.output_dir) + ) + file_basename = f"{filename}_{counter:05}_.mp4" + save_path = os.path.join(full_output_folder, file_basename) + + # Download the video data + video_response = requests.get(file_url) + video_data = video_response.content + + # Save the video data to a file + with open(save_path, "wb") as video_file: + video_file.write(video_data) + + # Add workflow metadata to the video container + if prompt is not None or extra_pnginfo is not None: + try: + container = av.open(save_path, mode="r+") + if prompt is not None: + container.metadata["prompt"] = json.dumps(prompt) + if extra_pnginfo is not None: + for x in extra_pnginfo: + container.metadata[x] = json.dumps(extra_pnginfo[x]) + container.close() + except Exception as e: + logging.warning(f"Failed to add metadata to video: {e}") + + # Create a FileLocator for the frontend to use for the preview + results: list[FileLocator] = [ + { + "filename": file_basename, + "subfolder": subfolder, + "type": self.type, + } + ] + + return {"ui": {"images": results, "animated": (True,)}} + # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique From c50f4864ca6c0392413f55d2c2ce174ac25139e9 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Mon, 21 Apr 2025 11:14:42 -0700 Subject: [PATCH 09/40] [API Nodes] Send empty request body instead of empty dictionary. (#8) --- comfy_api_nodes/apis/client.py | 10 +++++++--- comfy_api_nodes/nodes_api.py | 16 ++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 2a98cc3015e..9e0676a6231 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -298,8 +298,8 @@ def execute(self, client: Optional[ApiClient] = None) -> R: verify_ssl=self.verify_ssl, ) - # Convert request model to dict - request_dict = self.request.model_dump(exclude_none=True) + # Convert request model to dict, but use None for EmptyRequest + request_dict = None if isinstance(self.request, EmptyRequest) else self.request.model_dump(exclude_none=True) # Debug log for request logging.debug(f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}") @@ -315,7 +315,11 @@ def execute(self, client: Optional[ApiClient] = None) -> R: ) # Debug log for response - logging.debug(f"[DEBUG] API Response: {json.dumps(resp, indent=2)}") + logging.debug("=" * 50) + logging.debug("[DEBUG] RESPONSE DETAILS:") + logging.debug("[DEBUG] Status Code: 200 (Success)") + logging.debug(f"[DEBUG] Response Body: {json.dumps(resp, indent=2)}") + logging.debug("=" * 50) # Parse and return the response return self._parse_response(resp) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index be2bd59f0c5..740c40a0535 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -10,13 +10,6 @@ import json import av - -def check_auth_token(auth_token): - """Verify that an auth token is present.""" - if auth_token is None: - raise Exception("Please login first to use this node.") - return auth_token - class IdeogramTextToImage(ComfyNodeABC): """ Generates images synchronously based on a given prompt and optional parameters. @@ -281,14 +274,15 @@ def generate_video( task_result = video_generate_operation.execute() file_id = task_result.file_id - + if file_id is None: + raise Exception("Request was not successful. Missing file ID.") file_retrieve_operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/minimax/files/retrieve", method=HttpMethod.GET, request_model=EmptyRequest, response_model=MinimaxFileRetrieveResponse, - query_params={"file_id": file_id}, + query_params={"file_id": int(file_id)}, ), request=EmptyRequest(), auth_token=auth_token, @@ -296,8 +290,10 @@ def generate_video( file_result = file_retrieve_operation.execute() file_url = file_result.file.download_url - + if file_url is None: + raise Exception(f"No video was found in the response. Full response: {file_result.model_dump()}") logging.info(f"Generated video URL: {file_url}") + _, filename = folder_paths.download_url_to_file("comfyapinodes", file_url, folder_paths.get_output_directory()) # Construct the save path full_output_folder, filename, counter, subfolder, filename_prefix = ( From 4902f243d9dcecc628d2dc6470266489f4aaa5ba Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Mon, 21 Apr 2025 12:42:26 -0700 Subject: [PATCH 10/40] Fixed: removed function from rebase. --- comfy_api_nodes/nodes_api.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 740c40a0535..a2b840c27b1 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -293,7 +293,6 @@ def generate_video( if file_url is None: raise Exception(f"No video was found in the response. Full response: {file_result.model_dump()}") logging.info(f"Generated video URL: {file_url}") - _, filename = folder_paths.download_url_to_file("comfyapinodes", file_url, folder_paths.get_output_directory()) # Construct the save path full_output_folder, filename, counter, subfolder, filename_prefix = ( From 05babb9d1d476e1bc676d6f6c3f2f4b4f6e81950 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Mon, 21 Apr 2025 21:23:07 -0700 Subject: [PATCH 11/40] Add pydantic. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 5c3a854ce8b..c2360f2bf06 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,3 +23,4 @@ kornia>=0.7.1 spandrel soundfile av +pydantic From 945ff14be18e8535a4a30a3aa2e2cfbde82313df Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Mon, 21 Apr 2025 23:30:39 -0700 Subject: [PATCH 12/40] Remove uv.lock --- uv.lock | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 uv.lock diff --git a/uv.lock b/uv.lock deleted file mode 100644 index df06d6dc747..00000000000 --- a/uv.lock +++ /dev/null @@ -1,7 +0,0 @@ -version = 1 -requires-python = ">=3.9" - -[[package]] -name = "comfyui" -version = "0.3.26" -source = { virtual = "." } From 280997b383055ce32d56aefdb3d00aaefbb8811e Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Mon, 21 Apr 2025 23:37:48 -0700 Subject: [PATCH 13/40] Remove polling operations. --- comfy_api_nodes/apis/client.py | 128 ----------------------- comfy_api_nodes/nodes_api.py | 183 +-------------------------------- 2 files changed, 2 insertions(+), 309 deletions(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 9e0676a6231..6240a0241be 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -99,21 +99,15 @@ Any, TypeVar, Generic, - Callable, ) from pydantic import BaseModel from enum import Enum -import time import json import requests from urllib.parse import urljoin -# Import models from your generated stubs - T = TypeVar("T", bound=BaseModel) R = TypeVar("R", bound=BaseModel) -P = TypeVar("P", bound=BaseModel) # For poll response - class EmptyRequest(BaseModel): """Base class for empty request bodies. @@ -337,125 +331,3 @@ def _parse_response(self, resp): self.response = self.endpoint.response_model.model_validate(resp) logging.debug(f"[DEBUG] Parsed Response: {self.response}") return self.response - - -class TaskStatus(str, Enum): - """Enum for task status values""" - - COMPLETED = "completed" - FAILED = "failed" - PENDING = "pending" - - -class PollingOperation(Generic[T, R]): - """ - Represents an asynchronous API operation that requires polling for completion. - """ - - def __init__( - self, - poll_endpoint: ApiEndpoint[EmptyRequest, R], - completed_statuses: list, - failed_statuses: list, - status_extractor: Callable[[R], str], - request: Optional[T] = None, - api_base: str = "https://stagingapi.comfy.org", - auth_token: Optional[str] = None, - poll_interval: float = 1.0, - ): - self.poll_endpoint = poll_endpoint - self.request = request - self.api_base = api_base - self.auth_token = auth_token - self.poll_interval = poll_interval - - # Polling configuration - self.status_extractor = status_extractor or ( - lambda x: getattr(x, "status", None) - ) - self.completed_statuses = completed_statuses - self.failed_statuses = failed_statuses - - # For storing response data - self.final_response = None - self.error = None - - def execute(self, client: Optional[ApiClient] = None) -> R: - """Execute the polling operation using the provided client. If failed, raise an exception.""" - try: - if client is None: - client = ApiClient( - base_url=self.api_base, - api_key=self.auth_token, - ) - return self._poll_until_complete(client) - except Exception as e: - raise Exception(f"Error during polling: {str(e)}") - - def _check_task_status(self, response: R) -> TaskStatus: - """Check task status using the status extractor function""" - try: - status = self.status_extractor(response) - if status in self.completed_statuses: - return TaskStatus.COMPLETED - elif status in self.failed_statuses: - return TaskStatus.FAILED - return TaskStatus.PENDING - except Exception as e: - logging.debug(f"Error extracting status: {e}") - return TaskStatus.PENDING - - def _poll_until_complete(self, client: ApiClient) -> R: - """Poll until the task is complete""" - poll_count = 0 - while True: - try: - poll_count += 1 - logging.debug(f"[DEBUG] Polling attempt #{poll_count}") - - request_dict = ( - self.request.model_dump(exclude_none=True) - if self.request is not None - else None - ) - - if poll_count == 1: - logging.debug( - f"[DEBUG] Poll Request: {self.poll_endpoint.method.value} {self.poll_endpoint.path}" - ) - logging.debug( - f"[DEBUG] Poll Request Data: {json.dumps(request_dict, indent=2) if request_dict else 'None'}" - ) - - # Query task status - resp = client.request( - method=self.poll_endpoint.method.value, - path=self.poll_endpoint.path, - params=self.poll_endpoint.query_params, - json=request_dict, - ) - - # Parse response - response_obj = self.poll_endpoint.response_model.model_validate(resp) - - # Check if task is complete - status = self._check_task_status(response_obj) - logging.debug(f"[DEBUG] Task Status: {status}") - - if status == TaskStatus.COMPLETED: - logging.debug("[DEBUG] Task completed successfully") - self.final_response = response_obj - return self.final_response - elif status == TaskStatus.FAILED: - logging.debug(f"[DEBUG] Task failed: {json.dumps(resp)}") - raise Exception(f"Task failed: {json.dumps(resp)}") - else: - logging.debug("[DEBUG] Task still pending, continuing to poll...") - - # Wait before polling again - logging.debug(f"[DEBUG] Waiting {self.poll_interval} seconds before next poll") - time.sleep(self.poll_interval) - - except Exception as e: - logging.debug(f"[DEBUG] Polling error: {str(e)}") - raise Exception(f"Error while polling: {str(e)}") diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index a2b840c27b1..24bd2f3ff6c 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -1,14 +1,7 @@ -import os -import requests from inspect import cleandoc from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO -from comfy_api_nodes.apis.client import ApiEndpoint, SynchronousOperation, HttpMethod, PollingOperation, EmptyRequest -from comfy_api_nodes.apis.stubs import IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest, MinimaxVideoGenerationRequest, MinimaxVideoGenerationResponse, MinimaxFileRetrieveResponse, MinimaxTaskResultResponse, Model -import folder_paths -import logging -from comfy.comfy_types.node_typing import FileLocator -import json -import av +from comfy_api_nodes.apis.client import ApiEndpoint, SynchronousOperation, HttpMethod +from comfy_api_nodes.apis.stubs import IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest class IdeogramTextToImage(ComfyNodeABC): """ @@ -164,185 +157,13 @@ def api_call(self, prompt, model, aspect_ratio=None, resolution=None, # return "" -class MinimaxVideoNode: - """ - Generates videos synchronously based on a prompt, and optional parameters using Minimax's API. - """ - - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt_text": ( - "STRING", - { - "multiline": True, - "default": "", - "tooltip": "Text prompt to guide the video generation", - }, - ), - "filename_prefix": ("STRING", {"default": "ComfyUI"}), - "model": ( - [ - "T2V-01", - "I2V-01-Director", - "S2V-01", - "I2V-01", - "I2V-01-live", - "T2V-01", - ], - { - "default": "T2V-01", - "tooltip": "Model to use for video generation", - }, - ), - }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - }, - "hidden": { - "prompt": "PROMPT", - "extra_pnginfo": "EXTRA_PNGINFO", - "auth_token": "AUTH_TOKEN_COMFY_ORG", - }, - } - - RETURN_TYPES = ("VIDEO",) - DESCRIPTION = "Generates videos from prompts using Minimax's API" - FUNCTION = "generate_video" - CATEGORY = "video" - API_NODE = True - OUTPUT_NODE = True - - def generate_video( - self, - prompt_text, - filename_prefix, - seed=0, - model="T2V-01", - prompt=None, - extra_pnginfo=None, - auth_token=None, - ): - video_generate_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/video_generation", - method=HttpMethod.POST, - request_model=MinimaxVideoGenerationRequest, - response_model=MinimaxVideoGenerationResponse, - ), - request=MinimaxVideoGenerationRequest( - model=Model(model), - prompt=prompt_text, - callback_url=None, - first_frame_image=None, - subject_reference=None, - prompt_optimizer=None, - ), - auth_token=auth_token, - ) - response = video_generate_operation.execute() - - task_id = response.task_id - - video_generate_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path="/proxy/minimax/query/video_generation", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxTaskResultResponse, - query_params={"task_id": task_id}, - ), - completed_statuses=["Success"], - failed_statuses=["Fail"], - status_extractor=lambda x: x.status.value, - auth_token=auth_token, - ) - task_result = video_generate_operation.execute() - - file_id = task_result.file_id - if file_id is None: - raise Exception("Request was not successful. Missing file ID.") - file_retrieve_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/files/retrieve", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxFileRetrieveResponse, - query_params={"file_id": int(file_id)}, - ), - request=EmptyRequest(), - auth_token=auth_token, - ) - file_result = file_retrieve_operation.execute() - - file_url = file_result.file.download_url - if file_url is None: - raise Exception(f"No video was found in the response. Full response: {file_result.model_dump()}") - logging.info(f"Generated video URL: {file_url}") - - # Construct the save path - full_output_folder, filename, counter, subfolder, filename_prefix = ( - folder_paths.get_save_image_path(filename_prefix, self.output_dir) - ) - file_basename = f"{filename}_{counter:05}_.mp4" - save_path = os.path.join(full_output_folder, file_basename) - - # Download the video data - video_response = requests.get(file_url) - video_data = video_response.content - - # Save the video data to a file - with open(save_path, "wb") as video_file: - video_file.write(video_data) - - # Add workflow metadata to the video container - if prompt is not None or extra_pnginfo is not None: - try: - container = av.open(save_path, mode="r+") - if prompt is not None: - container.metadata["prompt"] = json.dumps(prompt) - if extra_pnginfo is not None: - for x in extra_pnginfo: - container.metadata[x] = json.dumps(extra_pnginfo[x]) - container.close() - except Exception as e: - logging.warning(f"Failed to add metadata to video: {e}") - - # Create a FileLocator for the frontend to use for the preview - results: list[FileLocator] = [ - { - "filename": file_basename, - "subfolder": subfolder, - "type": self.type, - } - ] - - return {"ui": {"images": results, "animated": (True,)}} - - # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique NODE_CLASS_MAPPINGS = { "IdeogramTextToImage": IdeogramTextToImage, - "MinimaxVideoNode": MinimaxVideoNode } # A dictionary that contains the friendly/humanly readable titles for the nodes NODE_DISPLAY_NAME_MAPPINGS = { "IdeogramTextToImage": "Ideogram Text to Image", - "MinimaxVideoNode": "Minimax Video Generator" } From 14cf25ee0f3e4cce42b29519bef870cf3d0f3f6d Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Mon, 21 Apr 2025 23:42:20 -0700 Subject: [PATCH 14/40] Update stubs workflow. --- .github/workflows/update-api-stubs.yml | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/.github/workflows/update-api-stubs.yml b/.github/workflows/update-api-stubs.yml index 45fbeda9fcf..6f96e35d03e 100644 --- a/.github/workflows/update-api-stubs.yml +++ b/.github/workflows/update-api-stubs.yml @@ -1,11 +1,9 @@ -name: Generate API Models +name: Generate Pydantic Stubs from api.comfy.org on: schedule: - # Run weekly on Monday at 00:00 UTC - cron: '0 0 * * 1' workflow_dispatch: - # Allow manual triggering jobs: generate-models: @@ -27,23 +25,23 @@ jobs: - name: Generate API models run: | - datamodel-codegen --use-subclass-enum --url https://stagingapi.comfy.org/openapi --output comfy_api_nodes/apis/stubs.py + datamodel-codegen --use-subclass-enum --url https://api.comfy.org/openapi --output comfy_api_nodes/apis/stubs.py - name: Check for changes id: git-check run: | - git diff --exit-code comfy_extras/apis/stubs.py || echo "changes=true" >> $GITHUB_OUTPUT + git diff --exit-code comfy_api_nodes/apis/stubs.py || echo "changes=true" >> $GITHUB_OUTPUT - name: Create Pull Request if: steps.git-check.outputs.changes == 'true' uses: peter-evans/create-pull-request@v5 with: commit-message: 'chore: update API models from OpenAPI spec' - title: 'Update API models from OpenAPI spec' + title: 'Update API models from api.comfy.org' body: | - This PR updates the API models based on the latest OpenAPI specification. + This PR updates the API models based on the latest api.comfy.org OpenAPI specification. - Generated automatically by the Generate API Models workflow. - branch: update-api-models + Generated automatically by the a Github workflow. + branch: update-api-stubs delete-branch: true base: main From 4902edb70869c9c61a27fead43daab9db03c25b8 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 00:17:27 -0700 Subject: [PATCH 15/40] Remove polling comments. --- comfy_api_nodes/apis/client.py | 48 ++-------------------------------- 1 file changed, 2 insertions(+), 46 deletions(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 6240a0241be..9603221b2e3 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -1,7 +1,7 @@ import logging """ -API Client Framework for ComfyUI +API Client Framework for api.comfy.org. This module provides a flexible framework for making API requests from ComfyUI nodes. It supports both synchronous and asynchronous API operations with proper type validation. @@ -11,7 +11,6 @@ 1. ApiClient - Handles HTTP requests with authentication and error handling 2. ApiEndpoint - Defines a single HTTP endpoint with its request/response models 3. ApiOperation - Executes a single synchronous API operation -4. PollingOperation - Executes an asynchronous operation with polling for completion Usage Examples: -------------- @@ -47,49 +46,6 @@ ) user_profile = operation.execute(client=api_client) # Returns immediately with the result - -# Example 2: Asynchronous API Operation with Polling -# ------------------------------------------------- -# For an API that starts a task and requires polling for completion: - -# 1. Define the endpoints (initial request and polling) -generate_image_endpoint = ApiEndpoint( - path="/v1/images/generate", - method=HttpMethod.POST, - request_model=ImageGenerationRequest, - response_model=TaskCreatedResponse, - query_params=None -) - -check_task_endpoint = ApiEndpoint( - path="/v1/tasks/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=ImageGenerationResult, - query_params=None -) - -# 2. Create the request object -request = ImageGenerationRequest( - prompt="a beautiful sunset over mountains", - width=1024, - height=1024, - num_images=1 -) - -# 3. Create and execute the polling operation -operation = PollingOperation( - initial_endpoint=generate_image_endpoint, - initial_request=request, - poll_endpoint=check_task_endpoint, - task_id_field="task_id", - status_field="status", - completed_statuses=["completed"], - failed_statuses=["failed", "error"] -) - -# This will make the initial request and then poll until completion -result = operation.execute(client=api_client) # Returns the final ImageGenerationResult when done """ from typing import ( @@ -264,7 +220,7 @@ def __init__( self, endpoint: ApiEndpoint[T, R], request: T, - api_base: str = "https://stagingapi.comfy.org", + api_base: str = "https://api.comfy.org", auth_token: Optional[str] = None, timeout: float = 30.0, verify_ssl: bool = True, From 08b797fe80684f26d6f3f14b1461558e28e05f8a Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 01:07:23 -0700 Subject: [PATCH 16/40] Update stubs. --- .github/workflows/update-api-stubs.yml | 4 +- comfy_api_nodes/apis/PixverseController.py | 17 + comfy_api_nodes/apis/PixverseDto.py | 57 + comfy_api_nodes/apis/__init__.py | 1652 ++++++++++++++++++++ comfy_api_nodes/apis/stubs.py | 513 ------ comfy_api_nodes/nodes_api.py | 2 +- 6 files changed, 1729 insertions(+), 516 deletions(-) create mode 100644 comfy_api_nodes/apis/PixverseController.py create mode 100644 comfy_api_nodes/apis/PixverseDto.py delete mode 100644 comfy_api_nodes/apis/stubs.py diff --git a/.github/workflows/update-api-stubs.yml b/.github/workflows/update-api-stubs.yml index 6f96e35d03e..4eda03956ec 100644 --- a/.github/workflows/update-api-stubs.yml +++ b/.github/workflows/update-api-stubs.yml @@ -25,12 +25,12 @@ jobs: - name: Generate API models run: | - datamodel-codegen --use-subclass-enum --url https://api.comfy.org/openapi --output comfy_api_nodes/apis/stubs.py + datamodel-codegen --use-subclass-enum --url https://stagingapi.comfy.org/openapi --output comfy_api_nodes/apis - name: Check for changes id: git-check run: | - git diff --exit-code comfy_api_nodes/apis/stubs.py || echo "changes=true" >> $GITHUB_OUTPUT + git diff --exit-code comfy_api_nodes/apis || echo "changes=true" >> $GITHUB_OUTPUT - name: Create Pull Request if: steps.git-check.outputs.changes == 'true' diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py new file mode 100644 index 00000000000..2899706f6f7 --- /dev/null +++ b/comfy_api_nodes/apis/PixverseController.py @@ -0,0 +1,17 @@ +# generated by datamodel-codegen: +# filename: https://stagingapi.comfy.org/openapi +# timestamp: 2025-04-22T08:06:44+00:00 + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel + +from . import PixverseDto + + +class ResponseData(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp: Optional[PixverseDto.V2OpenAPII2VResp] = None diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py new file mode 100644 index 00000000000..50d813c056d --- /dev/null +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -0,0 +1,57 @@ +# generated by datamodel-codegen: +# filename: https://stagingapi.comfy.org/openapi +# timestamp: 2025-04-22T08:06:44+00:00 + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, Field, constr + + +class V2OpenAPII2VResp(BaseModel): + video_id: Optional[int] = Field(None, description='Video_id') + + +class V2OpenAPIT2VReq(BaseModel): + aspect_ratio: str = Field( + ..., description='Aspect ratio (16:9, 4:3, 1:1, 3:4, 9:16)', example='16:9' + ) + duration: int = Field( + ..., + description='Video duration (5, 8 seconds, --model=v3.5 only allows 5,8; --quality=1080p does not support 8s)', + example=5, + ) + model: str = Field( + ..., description='Model version (only supports v3.5)', example='v3.5' + ) + motion_mode: Optional[str] = Field( + 'normal', + description='Motion mode (normal, fast, --fast only available when duration=5; --quality=1080p does not support fast)', + example='normal', + ) + negative_prompt: Optional[constr(max_length=2048)] = Field( + None, description='Negative prompt\n' + ) + prompt: constr(max_length=2048) = Field(..., description='Prompt') + quality: str = Field( + ..., + description='Video quality ("360p"(Turbo model), "540p", "720p", "1080p")', + example='540p', + ) + seed: Optional[int] = Field(None, description='Random seed, range: 0 - 2147483647') + style: Optional[str] = Field( + None, + description='Style (effective when model=v3.5, "anime", "3d_animation", "clay", "comic", "cyberpunk") Do not include style parameter unless needed', + example='anime', + ) + template_id: Optional[int] = Field( + None, + description='Template ID (template_id must be activated before use)', + example=302325299692608, + ) + water_mark: Optional[bool] = Field( + False, + description='Watermark (true: add watermark, false: no watermark)', + example=False, + ) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index e69de29bb2d..30466816883 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -0,0 +1,1652 @@ +# generated by datamodel-codegen: +# filename: https://stagingapi.comfy.org/openapi +# timestamp: 2025-04-22T08:06:44+00:00 + +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Literal, Optional, Union +from uuid import UUID + +from pydantic import AnyUrl, BaseModel, Field, confloat, conint, constr + + +class BFLFluxProGenerateRequest(BaseModel): + guidance_scale: Optional[confloat(ge=1.0, le=20.0)] = Field( + None, description='The guidance scale for generation.' + ) + height: conint(ge=64, le=2048) = Field( + ..., description='The height of the image to generate.' + ) + negative_prompt: Optional[str] = Field( + None, description='The negative prompt for image generation.' + ) + num_images: Optional[conint(ge=1, le=4)] = Field( + None, description='The number of images to generate.' + ) + num_inference_steps: Optional[conint(ge=1, le=100)] = Field( + None, description='The number of inference steps.' + ) + prompt: str = Field(..., description='The text prompt for image generation.') + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + width: conint(ge=64, le=2048) = Field( + ..., description='The width of the image to generate.' + ) + + +class BFLFluxProGenerateResponse(BaseModel): + id: str = Field(..., description='The unique identifier for the generation task.') + polling_url: str = Field(..., description='URL to poll for the generation result.') + + +class ComfyNode(BaseModel): + category: Optional[str] = Field( + None, + description='UI category where the node is listed, used for grouping nodes.', + ) + comfy_node_name: Optional[str] = Field( + None, description='Unique identifier for the node' + ) + deprecated: Optional[bool] = Field( + None, + description='Indicates if the node is deprecated. Deprecated nodes are hidden in the UI.', + ) + description: Optional[str] = Field( + None, description="Brief description of the node's functionality or purpose." + ) + experimental: Optional[bool] = Field( + None, + description='Indicates if the node is experimental, subject to changes or removal.', + ) + function: Optional[str] = Field( + None, description='Name of the entry-point function to execute the node.' + ) + input_types: Optional[str] = Field(None, description='Defines input parameters') + output_is_list: Optional[List[bool]] = Field( + None, description='Boolean values indicating if each output is a list.' + ) + return_names: Optional[str] = Field( + None, description='Names of the outputs for clarity in workflows.' + ) + return_types: Optional[str] = Field( + None, description='Specifies the types of outputs produced by the node.' + ) + + +class ComfyNodeCloudBuildInfo(BaseModel): + build_id: Optional[str] = None + location: Optional[str] = None + project_id: Optional[str] = None + project_number: Optional[str] = None + + +class Customer(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='The date and time the user was created' + ) + email: Optional[str] = Field(None, description='The email address for this user') + id: str = Field(..., description='The firebase UID of the user') + name: Optional[str] = Field(None, description='The name for this user') + updatedAt: Optional[datetime] = Field( + None, description='The date and time the user was last updated' + ) + + +class Error(BaseModel): + details: Optional[List[str]] = Field( + None, + description='Optional detailed information about the error or hints for resolving it.', + ) + message: Optional[str] = Field( + None, description='A clear and concise description of the error.' + ) + + +class ErrorResponse(BaseModel): + error: str + message: str + + +class GitCommitSummary(BaseModel): + author: Optional[str] = Field(None, description='The author of the commit') + branch_name: Optional[str] = Field( + None, description='The branch where the commit was made' + ) + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_name: Optional[str] = Field(None, description='The name of the commit') + status_summary: Optional[Dict[str, str]] = Field( + None, description='A map of operating system to status pairs' + ) + timestamp: Optional[datetime] = Field( + None, description='The timestamp when the commit was made' + ) + + +class ImageRequest(BaseModel): + aspect_ratio: Optional[str] = Field( + None, + description="Optional. The aspect ratio (e.g., 'ASPECT_16_9', 'ASPECT_1_1'). Cannot be used with resolution. Defaults to 'ASPECT_1_1' if unspecified.", + ) + color_palette: Optional[Dict[str, Any]] = Field( + None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' + ) + magic_prompt_option: Optional[str] = Field( + None, description="Optional. MagicPrompt usage ('AUTO', 'ON', 'OFF')." + ) + model: str = Field(..., description="The model used (e.g., 'V_2', 'V_2A_TURBO')") + negative_prompt: Optional[str] = Field( + None, + description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', + ) + num_images: Optional[conint(ge=1, le=8)] = Field( + 1, description='Optional. Number of images to generate (1-8). Defaults to 1.' + ) + prompt: str = Field( + ..., description='Required. The prompt to use to generate the image.' + ) + resolution: Optional[str] = Field( + None, + description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", + ) + seed: Optional[conint(ge=0, le=2147483647)] = Field( + None, description='Optional. A number between 0 and 2147483647.' + ) + style_type: Optional[str] = Field( + None, + description="Optional. Style type ('AUTO', 'GENERAL', 'REALISTIC', 'DESIGN', 'RENDER_3D', 'ANIME'). Only for models V_2 and above.", + ) + + +class IdeogramGenerateRequest(BaseModel): + image_request: ImageRequest = Field( + ..., description='The image generation request parameters.' + ) + + +class Datum(BaseModel): + is_image_safe: Optional[bool] = Field( + None, description='Indicates whether the image is considered safe.' + ) + prompt: Optional[str] = Field( + None, description='The prompt used to generate this image.' + ) + resolution: Optional[str] = Field( + None, description="The resolution of the generated image (e.g., '1024x1024')." + ) + seed: Optional[int] = Field( + None, description='The seed value used for this generation.' + ) + style_type: Optional[str] = Field( + None, + description="The style type used for generation (e.g., 'REALISTIC', 'ANIME').", + ) + url: Optional[str] = Field(None, description='URL to the generated image.') + + +class IdeogramGenerateResponse(BaseModel): + created: Optional[datetime] = Field( + None, description='Timestamp when the generation was created.' + ) + data: Optional[List[Datum]] = Field( + None, description='Array of generated image information.' + ) + + +class Code(Enum): + int_1100 = 1100 + int_1101 = 1101 + int_1102 = 1102 + int_1103 = 1103 + + +class Code1(Enum): + int_1000 = 1000 + int_1001 = 1001 + int_1002 = 1002 + int_1003 = 1003 + int_1004 = 1004 + + +class KlingErrorResponse(BaseModel): + code: int = Field( + ..., description='Error code value as defined in the API documentation' + ) + message: str = Field(..., description='Human-readable error message') + request_id: str = Field( + ..., description='Request ID for tracking and troubleshooting' + ) + + +class AspectRatio(str, Enum): + field_16_9 = '16:9' + field_9_16 = '9:16' + field_1_1 = '1:1' + + +class Config(BaseModel): + horizontal: Optional[confloat(ge=-10.0, le=10.0)] = None + pan: Optional[confloat(ge=-10.0, le=10.0)] = None + roll: Optional[confloat(ge=-10.0, le=10.0)] = None + tilt: Optional[confloat(ge=-10.0, le=10.0)] = None + vertical: Optional[confloat(ge=-10.0, le=10.0)] = None + zoom: Optional[confloat(ge=-10.0, le=10.0)] = None + + +class Type(str, Enum): + simple = 'simple' + down_back = 'down_back' + forward_up = 'forward_up' + right_turn_forward = 'right_turn_forward' + left_turn_forward = 'left_turn_forward' + + +class CameraControl(BaseModel): + config: Optional[Config] = None + type: Optional[Type] = Field(None, description='Predefined camera movements type') + + +class Duration(str, Enum): + field_5 = 5 + field_10 = 10 + + +class Mode(str, Enum): + std = 'std' + pro = 'pro' + + +class ModelName(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_6 = 'kling-v1-6' + + +class KlingImage2VideoRequest(BaseModel): + aspect_ratio: Optional[AspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + camera_control: Optional[CameraControl] = None + cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( + 0.5, description='Flexibility in video generation' + ) + duration: Optional[Duration] = 5 + external_task_id: Optional[str] = Field(None, description='Customized Task ID') + image_url: Optional[AnyUrl] = Field( + None, description='URL of the image to be used for video generation' + ) + mode: Optional[Mode] = Field('std', description='Video generation mode') + model_name: Optional[ModelName] = Field('kling-v1', description='Model Name') + negative_prompt: Optional[constr(max_length=2500)] = Field( + None, description='Negative text prompt' + ) + prompt: Optional[constr(max_length=2500)] = Field( + None, description='Positive text prompt' + ) + + +class TaskInfo(BaseModel): + external_task_id: Optional[str] = None + + +class Video(BaseModel): + duration: Optional[str] = Field(None, description='Total video duration') + id: Optional[str] = Field(None, description='Generated video ID') + url: Optional[AnyUrl] = Field(None, description='URL for generated video') + + +class TaskResult(BaseModel): + videos: Optional[List[Video]] = None + + +class TaskStatus(str, Enum): + submitted = 'submitted' + processing = 'processing' + succeed = 'succeed' + failed = 'failed' + + +class Data(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult] = None + task_status: Optional[TaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingImage2VideoResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class AspectRatio1(str, Enum): + field_16_9 = '16:9' + field_9_16 = '9:16' + field_1_1 = '1:1' + field_4_3 = '4:3' + field_3_4 = '3:4' + field_3_2 = '3:2' + field_2_3 = '2:3' + field_21_9 = '21:9' + + +class ImageReference(str, Enum): + subject = 'subject' + face = 'face' + + +class ModelName1(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_5 = 'kling-v1-5' + + +class KlingImageGenerationsRequest(BaseModel): + aspect_ratio: Optional[AspectRatio1] = Field( + '16:9', description='Aspect ratio of the generated images' + ) + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + human_fidelity: Optional[confloat(ge=0.0, le=1.0)] = Field( + 0.45, description='Subject reference similarity' + ) + image: Optional[str] = Field( + None, description='Reference Image - Base64 encoded string or image URL' + ) + image_fidelity: Optional[confloat(ge=0.0, le=1.0)] = Field( + 0.5, description='Reference intensity for user-uploaded images' + ) + image_reference: Optional[ImageReference] = Field( + None, description='Image reference type' + ) + model_name: Optional[ModelName1] = Field('kling-v1', description='Model Name') + n: Optional[conint(ge=1, le=9)] = Field(1, description='Number of generated images') + negative_prompt: Optional[constr(max_length=200)] = Field( + None, description='Negative text prompt' + ) + prompt: constr(max_length=500) = Field(..., description='Positive text prompt') + + +class Image(BaseModel): + index: Optional[int] = Field(None, description='Image Number (0-9)') + url: Optional[AnyUrl] = Field(None, description='URL for generated image') + + +class TaskResult1(BaseModel): + images: Optional[List[Image]] = None + + +class Data1(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_result: Optional[TaskResult1] = None + task_status: Optional[TaskStatus] = None + task_status_msg: Optional[str] = Field(None, description='Task status information') + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingImageGenerationsResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data1] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class AspectRatio2(str, Enum): + field_16_9 = '16:9' + field_9_16 = '9:16' + field_1_1 = '1:1' + + +class CameraControl1(BaseModel): + config: Optional[Config] = None + type: Optional[Type] = Field(None, description='Predefined camera movements type') + + +class ModelName2(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_6 = 'kling-v1-6' + + +class KlingLipSyncRequest(BaseModel): + aspect_ratio: Optional[AspectRatio2] = '16:9' + audio_url: Optional[AnyUrl] = Field( + None, description='URL of the audio to be used for lip-syncing' + ) + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + camera_control: Optional[CameraControl1] = None + cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( + 0.5, description='Flexibility in video generation' + ) + duration: Optional[Duration] = 5 + external_task_id: Optional[str] = Field(None, description='Customized Task ID') + mode: Optional[Mode] = Field('std', description='Video generation mode') + model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') + negative_prompt: Optional[constr(max_length=2500)] = Field( + None, description='Negative text prompt' + ) + prompt: Optional[constr(max_length=2500)] = Field( + None, description='Positive text prompt' + ) + video_url: Optional[AnyUrl] = Field( + None, description='URL of the video to be lip-synced' + ) + + +class TaskResult2(BaseModel): + videos: Optional[List[Video]] = None + + +class Data2(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult2] = None + task_status: Optional[TaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingLipSyncResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data2] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class Code2(Enum): + int_1200 = 1200 + int_1201 = 1201 + int_1202 = 1202 + int_1203 = 1203 + + +class KlingRequestError(KlingErrorResponse): + code: Optional[Code2] = Field( + None, + description='- 1200: Invalid request parameters\n- 1201: Invalid parameters\n- 1202: Invalid request method\n- 1203: Requested resource does not exist\n', + ) + + +class ResourcePackType(str, Enum): + decreasing_total = 'decreasing_total' + constant_period = 'constant_period' + + +class Status(str, Enum): + toBeOnline = 'toBeOnline' + online = 'online' + expired = 'expired' + runOut = 'runOut' + + +class ResourcePackSubscribeInfo(BaseModel): + effective_time: Optional[int] = Field( + None, description='Effective time, Unix timestamp in ms' + ) + invalid_time: Optional[int] = Field( + None, description='Expiration time, Unix timestamp in ms' + ) + purchase_time: Optional[int] = Field( + None, description='Purchase time, Unix timestamp in ms' + ) + remaining_quantity: Optional[float] = Field( + None, description='Remaining quantity (updated with a 12-hour delay)' + ) + resource_pack_id: Optional[str] = Field(None, description='Resource package ID') + resource_pack_name: Optional[str] = Field(None, description='Resource package name') + resource_pack_type: Optional[ResourcePackType] = Field( + None, + description='Resource package type (decreasing_total=decreasing total, constant_period=constant periodicity)', + ) + status: Optional[Status] = Field(None, description='Resource Package Status') + total_quantity: Optional[float] = Field(None, description='Total quantity') + + +class Data3(BaseModel): + code: Optional[int] = Field(None, description='Error code; 0 indicates success') + msg: Optional[str] = Field(None, description='Error information') + resource_pack_subscribe_infos: Optional[List[ResourcePackSubscribeInfo]] = Field( + None, description='Resource package list' + ) + + +class KlingResourcePackageResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code; 0 indicates success') + data: Optional[Data3] = None + message: Optional[str] = Field(None, description='Error information') + request_id: Optional[str] = Field( + None, + description='Request ID, generated by the system, used to track requests and troubleshoot problems', + ) + + +class Code3(Enum): + int_5000 = 5000 + int_5001 = 5001 + int_5002 = 5002 + + +class KlingServerError(KlingErrorResponse): + code: Optional[Code3] = Field( + None, + description='- 5000: Internal server error\n- 5001: Service temporarily unavailable\n- 5002: Server internal timeout\n', + ) + + +class Code4(Enum): + int_1300 = 1300 + int_1301 = 1301 + int_1302 = 1302 + int_1303 = 1303 + int_1304 = 1304 + + +class KlingStrategyError(KlingErrorResponse): + code: Optional[Code4] = Field( + None, + description='- 1300: Trigger platform strategy\n- 1301: Trigger content security policy\n- 1302: API request too frequent\n- 1303: Concurrency/QPS exceeds limit\n- 1304: Trigger IP whitelist policy\n', + ) + + +class CameraControl2(BaseModel): + config: Optional[Config] = None + type: Optional[Type] = Field(None, description='Predefined camera movements type') + + +class KlingText2VideoRequest(BaseModel): + aspect_ratio: Optional[AspectRatio2] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + camera_control: Optional[CameraControl2] = None + cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( + 0.5, description='Flexibility in video generation' + ) + duration: Optional[Duration] = 5 + external_task_id: Optional[str] = Field(None, description='Customized Task ID') + mode: Optional[Mode] = Field('std', description='Video generation mode') + model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') + negative_prompt: Optional[constr(max_length=2500)] = Field( + None, description='Negative text prompt' + ) + prompt: Optional[constr(max_length=2500)] = Field( + None, description='Positive text prompt' + ) + + +class TaskResult3(BaseModel): + videos: Optional[List[Video]] = None + + +class Data4(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult3] = None + task_status: Optional[TaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingText2VideoResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data4] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class CameraControl3(BaseModel): + config: Optional[Config] = None + type: Optional[Type] = Field(None, description='Predefined camera movements type') + + +class KlingVideoEffectsRequest(BaseModel): + aspect_ratio: Optional[AspectRatio2] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + camera_control: Optional[CameraControl3] = None + cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( + 0.5, description='Flexibility in video generation' + ) + duration: Optional[Duration] = 5 + external_task_id: Optional[str] = Field(None, description='Customized Task ID') + mode: Optional[Mode] = Field('std', description='Video generation mode') + model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') + negative_prompt: Optional[constr(max_length=2500)] = Field( + None, description='Negative text prompt' + ) + prompt: Optional[constr(max_length=2500)] = Field( + None, description='Positive text prompt' + ) + video_url: Optional[AnyUrl] = Field( + None, description='URL of the video to be used for effects generation' + ) + + +class TaskResult4(BaseModel): + videos: Optional[List[Video]] = None + + +class Data5(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult4] = None + task_status: Optional[TaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingVideoEffectsResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data5] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class CameraControl4(BaseModel): + config: Optional[Config] = None + type: Optional[Type] = Field(None, description='Predefined camera movements type') + + +class KlingVideoExtendRequest(BaseModel): + aspect_ratio: Optional[AspectRatio2] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + camera_control: Optional[CameraControl4] = None + cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( + 0.5, description='Flexibility in video generation' + ) + duration: Optional[Duration] = 5 + external_task_id: Optional[str] = Field(None, description='Customized Task ID') + mode: Optional[Mode] = Field('std', description='Video generation mode') + model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') + negative_prompt: Optional[constr(max_length=2500)] = Field( + None, description='Negative text prompt' + ) + prompt: Optional[constr(max_length=2500)] = Field( + None, description='Positive text prompt' + ) + video_url: Optional[AnyUrl] = Field( + None, description='URL of the video to be extended' + ) + + +class TaskResult5(BaseModel): + videos: Optional[List[Video]] = None + + +class Data6(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult5] = None + task_status: Optional[TaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingVideoExtendResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data6] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class ModelName6(str, Enum): + kolors_virtual_try_on_v1 = 'kolors-virtual-try-on-v1' + kolors_virtual_try_on_v1_5 = 'kolors-virtual-try-on-v1-5' + + +class KlingVirtualTryOnRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + cloth_image: Optional[str] = Field( + None, + description='Reference clothing image - Base64 encoded string or image URL', + ) + human_image: str = Field( + ..., description='Reference human image - Base64 encoded string or image URL' + ) + model_name: Optional[ModelName6] = Field( + 'kolors-virtual-try-on-v1', description='Model Name' + ) + + +class Image1(BaseModel): + index: Optional[int] = Field(None, description='Image Number') + url: Optional[AnyUrl] = Field(None, description='URL for generated image') + + +class TaskResult6(BaseModel): + images: Optional[List[Image1]] = None + + +class Data7(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_result: Optional[TaskResult6] = None + task_status: Optional[TaskStatus] = None + task_status_msg: Optional[str] = Field(None, description='Task status information') + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingVirtualTryOnResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data7] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class LumaAspectRatio(str, Enum): + field_1_1 = '1:1' + field_16_9 = '16:9' + field_9_16 = '9:16' + field_4_3 = '4:3' + field_3_4 = '3:4' + field_21_9 = '21:9' + field_9_21 = '9:21' + + +class LumaAssets(BaseModel): + image: Optional[AnyUrl] = Field(None, description='The URL of the image') + progress_video: Optional[AnyUrl] = Field( + None, description='The URL of the progress video' + ) + video: Optional[AnyUrl] = Field(None, description='The URL of the video') + + +class GenerationType(str, Enum): + add_audio = 'add_audio' + + +class LumaAudioGenerationRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the audio' + ) + generation_type: Optional[GenerationType] = 'add_audio' + negative_prompt: Optional[str] = Field( + None, description='The negative prompt of the audio' + ) + prompt: Optional[str] = Field(None, description='The prompt of the audio') + + +class LumaError(BaseModel): + detail: Optional[str] = Field(None, description='The error message') + + +class Type5(str, Enum): + generation = 'generation' + + +class LumaGenerationReference(BaseModel): + id: UUID = Field(..., description='The ID of the generation') + type: Literal['generation'] + + +class GenerationType1(str, Enum): + video = 'video' + + +class LumaGenerationType(str, Enum): + video = 'video' + image = 'image' + + +class GenerationType2(str, Enum): + image = 'image' + + +class LumaImageIdentity(BaseModel): + images: Optional[List[AnyUrl]] = Field( + None, description='The URLs of the image identity' + ) + + +class LumaImageModel(str, Enum): + photon_1 = 'photon-1' + photon_flash_1 = 'photon-flash-1' + + +class LumaImageRef(BaseModel): + url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') + weight: Optional[float] = Field( + None, description='The weight of the image reference' + ) + + +class Type6(str, Enum): + image = 'image' + + +class LumaImageReference(BaseModel): + type: Literal['image'] + url: AnyUrl = Field(..., description='The URL of the image') + + +class LumaKeyframe(BaseModel): + __root__: Union[LumaGenerationReference, LumaImageReference] = Field( + ..., + description='A keyframe can be either a Generation reference, an Image, or a Video', + discriminator='type', + examples=[ + { + 'summary': 'Generation reference', + 'value': { + 'id': '123e4567-e89b-12d3-a456-426614174000', + 'type': 'generation', + }, + }, + { + 'summary': 'Image reference', + 'value': {'type': 'image', 'url': 'https://example.com/image.jpg'}, + }, + ], + ) + + +class LumaKeyframes(BaseModel): + frame0: Optional[LumaKeyframe] = None + frame1: Optional[LumaKeyframe] = None + + +class LumaModifyImageRef(BaseModel): + url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') + weight: Optional[float] = Field( + None, description='The weight of the modify image reference' + ) + + +class LumaState(str, Enum): + queued = 'queued' + dreaming = 'dreaming' + completed = 'completed' + failed = 'failed' + + +class GenerationType3(str, Enum): + upscale_video = 'upscale_video' + + +class LumaVideoModel(str, Enum): + ray_1_6 = 'ray-1-6' + ray_2 = 'ray-2' + ray_flash_2 = 'ray-flash-2' + + +class LumaVideoModelOutputDuration1(str, Enum): + field_5s = '5s' + field_9s = '9s' + + +class LumaVideoModelOutputDuration(BaseModel): + __root__: Union[LumaVideoModelOutputDuration1, str] + + +class LumaVideoModelOutputResolution1(str, Enum): + field_540p = '540p' + field_720p = '720p' + field_1080p = '1080p' + field_4k = '4k' + + +class LumaVideoModelOutputResolution(BaseModel): + __root__: Union[LumaVideoModelOutputResolution1, str] + + +class MachineStats(BaseModel): + cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') + disk_capacity: Optional[str] = Field( + None, description='Total disk capacity on the machine.' + ) + gpu_type: Optional[str] = Field( + None, description='The GPU type. eg. NVIDIA Tesla K80' + ) + initial_cpu: Optional[str] = Field( + None, description='Initial CPU available before the job starts.' + ) + initial_disk: Optional[str] = Field( + None, description='Initial disk available before the job starts.' + ) + initial_ram: Optional[str] = Field( + None, description='Initial RAM available before the job starts.' + ) + machine_name: Optional[str] = Field(None, description='Name of the machine.') + memory_capacity: Optional[str] = Field( + None, description='Total memory on the machine.' + ) + os_version: Optional[str] = Field( + None, description='The operating system version. eg. Ubuntu Linux 20.04' + ) + pip_freeze: Optional[str] = Field(None, description='The pip freeze output') + vram_time_series: Optional[Dict[str, Any]] = Field( + None, description='Time series of VRAM usage.' + ) + + +class MinimaxBaseResponse(BaseModel): + status_code: int = Field( + ..., + description='Status code. 0 indicates success, other values indicate errors.', + ) + status_msg: str = Field( + ..., description='Specific error details or success message.' + ) + + +class File(BaseModel): + bytes: Optional[int] = Field(None, description='File size in bytes') + created_at: Optional[int] = Field( + None, description='Unix timestamp when the file was created, in seconds' + ) + download_url: Optional[str] = Field( + None, description='The URL to download the video' + ) + file_id: Optional[int] = Field(None, description='Unique identifier for the file') + filename: Optional[str] = Field(None, description='The name of the file') + purpose: Optional[str] = Field(None, description='The purpose of using the file') + + +class MinimaxFileRetrieveResponse(BaseModel): + base_resp: MinimaxBaseResponse + file: File + + +class Status1(str, Enum): + Queueing = 'Queueing' + Preparing = 'Preparing' + Processing = 'Processing' + Success = 'Success' + Fail = 'Fail' + + +class MinimaxTaskResultResponse(BaseModel): + base_resp: MinimaxBaseResponse + file_id: Optional[str] = Field( + None, + description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', + ) + status: Status1 = Field( + ..., + description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", + ) + task_id: str = Field(..., description='The task ID being queried.') + + +class Model(str, Enum): + T2V_01_Director = 'T2V-01-Director' + I2V_01_Director = 'I2V-01-Director' + S2V_01 = 'S2V-01' + I2V_01 = 'I2V-01' + I2V_01_live = 'I2V-01-live' + T2V_01 = 'T2V-01' + + +class SubjectReferenceItem(BaseModel): + image: Optional[str] = Field( + None, description='URL or base64 encoding of the subject reference image.' + ) + mask: Optional[str] = Field( + None, + description='URL or base64 encoding of the mask for the subject reference image.', + ) + + +class MinimaxVideoGenerationRequest(BaseModel): + callback_url: Optional[str] = Field( + None, + description='Optional. URL to receive real-time status updates about the video generation task.', + ) + first_frame_image: Optional[str] = Field( + None, + description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', + ) + model: Model = Field( + ..., + description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', + ) + prompt: Optional[constr(max_length=2000)] = Field( + None, + description='Description of the video. Should be less than 2000 characters. Supports camera movement instructions in [brackets].', + ) + prompt_optimizer: Optional[bool] = Field( + True, + description='If true (default), the model will automatically optimize the prompt. Set to false for more precise control.', + ) + subject_reference: Optional[List[SubjectReferenceItem]] = Field( + None, + description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', + ) + + +class MinimaxVideoGenerationResponse(BaseModel): + base_resp: MinimaxBaseResponse + task_id: str = Field( + ..., description='The task ID for the asynchronous video generation task.' + ) + + +class NodeStatus(str, Enum): + NodeStatusActive = 'NodeStatusActive' + NodeStatusDeleted = 'NodeStatusDeleted' + NodeStatusBanned = 'NodeStatusBanned' + + +class NodeVersionStatus(str, Enum): + NodeVersionStatusActive = 'NodeVersionStatusActive' + NodeVersionStatusDeleted = 'NodeVersionStatusDeleted' + NodeVersionStatusBanned = 'NodeVersionStatusBanned' + NodeVersionStatusPending = 'NodeVersionStatusPending' + NodeVersionStatusFlagged = 'NodeVersionStatusFlagged' + + +class NodeVersionUpdateRequest(BaseModel): + changelog: Optional[str] = Field( + None, description='The changelog describing the version changes.' + ) + deprecated: Optional[bool] = Field( + None, description='Whether the version is deprecated.' + ) + + +class PersonalAccessToken(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='[Output Only]The date and time the token was created.' + ) + description: Optional[str] = Field( + None, + description="Optional. A more detailed description of the token's intended use.", + ) + id: Optional[UUID] = Field(None, description='Unique identifier for the GitCommit') + name: Optional[str] = Field( + None, + description='Required. The name of the token. Can be a simple description.', + ) + token: Optional[str] = Field( + None, + description='[Output Only]. The personal access token. Only returned during creation.', + ) + + +class PublisherStatus(str, Enum): + PublisherStatusActive = 'PublisherStatusActive' + PublisherStatusBanned = 'PublisherStatusBanned' + + +class PublisherUser(BaseModel): + email: Optional[str] = Field(None, description='The email address for this user.') + id: Optional[str] = Field(None, description='The unique id for this user.') + name: Optional[str] = Field(None, description='The name for this user.') + + +class RecraftImageGenerationRequest(BaseModel): + model: str = Field( + ..., description='The model to use for generation (e.g., "recraftv3")' + ) + n: conint(ge=1, le=4) = Field(..., description='The number of images to generate') + prompt: str = Field( + ..., description='The text prompt describing the image to generate' + ) + size: str = Field( + ..., description='The size of the generated image (e.g., "1024x1024")' + ) + style: Optional[str] = Field( + None, + description='The style to apply to the generated image (e.g., "digital_illustration")', + ) + + +class Datum1(BaseModel): + image_id: Optional[str] = Field( + None, description='Unique identifier for the generated image' + ) + url: Optional[str] = Field(None, description='URL to access the generated image') + + +class RecraftImageGenerationResponse(BaseModel): + created: int = Field( + ..., description='Unix timestamp when the generation was created' + ) + credits: int = Field(..., description='Number of credits used for the generation') + data: List[Datum1] = Field(..., description='Array of generated image information') + + +class StorageFile(BaseModel): + file_path: Optional[str] = Field(None, description='Path to the file in storage') + id: Optional[UUID] = Field( + None, description='Unique identifier for the storage file' + ) + public_url: Optional[str] = Field(None, description='Public URL') + + +class StripeAddress(BaseModel): + city: Optional[str] = None + country: Optional[str] = None + line1: Optional[str] = None + line2: Optional[str] = None + postal_code: Optional[str] = None + state: Optional[str] = None + + +class StripeAmountDetails(BaseModel): + tip: Optional[Dict[str, Any]] = None + + +class StripeBillingDetails(BaseModel): + address: Optional[StripeAddress] = None + email: Optional[str] = None + name: Optional[str] = None + phone: Optional[str] = None + tax_id: Optional[Any] = None + + +class Checks(BaseModel): + address_line1_check: Optional[Any] = None + address_postal_code_check: Optional[Any] = None + cvc_check: Optional[str] = None + + +class ExtendedAuthorization(BaseModel): + status: Optional[str] = None + + +class IncrementalAuthorization(BaseModel): + status: Optional[str] = None + + +class Multicapture(BaseModel): + status: Optional[str] = None + + +class NetworkToken(BaseModel): + used: Optional[bool] = None + + +class Overcapture(BaseModel): + maximum_amount_capturable: Optional[int] = None + status: Optional[str] = None + + +class StripeCardDetails(BaseModel): + amount_authorized: Optional[int] = None + authorization_code: Optional[Any] = None + brand: Optional[str] = None + checks: Optional[Checks] = None + country: Optional[str] = None + exp_month: Optional[int] = None + exp_year: Optional[int] = None + extended_authorization: Optional[ExtendedAuthorization] = None + fingerprint: Optional[str] = None + funding: Optional[str] = None + incremental_authorization: Optional[IncrementalAuthorization] = None + installments: Optional[Any] = None + last4: Optional[str] = None + mandate: Optional[Any] = None + multicapture: Optional[Multicapture] = None + network: Optional[str] = None + network_token: Optional[NetworkToken] = None + network_transaction_id: Optional[str] = None + overcapture: Optional[Overcapture] = None + regulated_status: Optional[str] = None + three_d_secure: Optional[Any] = None + wallet: Optional[Any] = None + + +class Object(str, Enum): + charge = 'charge' + + +class Object1(str, Enum): + event = 'event' + + +class Type7(str, Enum): + payment_intent_succeeded = 'payment_intent.succeeded' + + +class StripeOutcome(BaseModel): + advice_code: Optional[Any] = None + network_advice_code: Optional[Any] = None + network_decline_code: Optional[Any] = None + network_status: Optional[str] = None + reason: Optional[Any] = None + risk_level: Optional[str] = None + risk_score: Optional[int] = None + seller_message: Optional[str] = None + type: Optional[str] = None + + +class Object2(str, Enum): + payment_intent = 'payment_intent' + + +class StripePaymentMethodDetails(BaseModel): + card: Optional[StripeCardDetails] = None + type: Optional[str] = None + + +class Card(BaseModel): + installments: Optional[Any] = None + mandate_options: Optional[Any] = None + network: Optional[Any] = None + request_three_d_secure: Optional[str] = None + + +class StripePaymentMethodOptions(BaseModel): + card: Optional[Card] = None + + +class StripeRefundList(BaseModel): + data: Optional[List[Dict[str, Any]]] = None + has_more: Optional[bool] = None + object: Optional[str] = None + total_count: Optional[int] = None + url: Optional[str] = None + + +class StripeRequestInfo(BaseModel): + id: Optional[str] = None + idempotency_key: Optional[str] = None + + +class StripeShipping(BaseModel): + address: Optional[StripeAddress] = None + carrier: Optional[str] = None + name: Optional[str] = None + phone: Optional[str] = None + tracking_number: Optional[str] = None + + +class User(BaseModel): + email: Optional[str] = Field(None, description='The email address for this user.') + id: Optional[str] = Field(None, description='The unique id for this user.') + isAdmin: Optional[bool] = Field( + None, description='Indicates if the user has admin privileges.' + ) + isApproved: Optional[bool] = Field( + None, description='Indicates if the user is approved.' + ) + name: Optional[str] = Field(None, description='The name for this user.') + + +class Image2(BaseModel): + bytesBase64Encoded: str + gcsUri: Optional[str] = None + mimeType: Optional[str] = None + + +class Image3(BaseModel): + bytesBase64Encoded: Optional[str] = None + gcsUri: str + mimeType: Optional[str] = None + + +class Instance(BaseModel): + image: Optional[Union[Image2, Image3]] = Field( + None, description='Optional image to guide video generation' + ) + prompt: str = Field(..., description='Text description of the video') + + +class PersonGeneration(str, Enum): + ALLOW = 'ALLOW' + BLOCK = 'BLOCK' + + +class Parameters(BaseModel): + aspectRatio: Optional[str] = Field(None, example='16:9') + durationSeconds: Optional[int] = None + enhancePrompt: Optional[bool] = None + negativePrompt: Optional[str] = None + personGeneration: Optional[PersonGeneration] = None + sampleCount: Optional[int] = None + seed: Optional[int] = None + storageUri: Optional[str] = Field( + None, description='Optional Cloud Storage URI to upload the video' + ) + + +class VeoRequestBody(BaseModel): + instances: Optional[List[Instance]] = None + parameters: Optional[Parameters] = None + + +class WorkflowRunStatus(str, Enum): + WorkflowRunStatusStarted = 'WorkflowRunStatusStarted' + WorkflowRunStatusFailed = 'WorkflowRunStatusFailed' + WorkflowRunStatusCompleted = 'WorkflowRunStatusCompleted' + + +class ActionJobResult(BaseModel): + action_job_id: Optional[str] = Field( + None, description='Identifier of the job this result belongs to' + ) + action_run_id: Optional[str] = Field( + None, description='Identifier of the run this result belongs to' + ) + author: Optional[str] = Field(None, description='The author of the commit') + avg_vram: Optional[int] = Field( + None, description='The average VRAM used by the job' + ) + branch_name: Optional[str] = Field( + None, description='Name of the relevant git branch' + ) + comfy_run_flags: Optional[str] = Field( + None, description='The comfy run flags. E.g. `--low-vram`' + ) + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_id: Optional[str] = Field(None, description='The ID of the commit') + commit_message: Optional[str] = Field(None, description='The message of the commit') + commit_time: Optional[int] = Field( + None, description='The Unix timestamp when the commit was made' + ) + cuda_version: Optional[str] = Field(None, description='CUDA version used') + end_time: Optional[int] = Field( + None, description='The end time of the job as a Unix timestamp.' + ) + git_repo: Optional[str] = Field(None, description='The repository name') + id: Optional[UUID] = Field(None, description='Unique identifier for the job result') + job_trigger_user: Optional[str] = Field( + None, description='The user who triggered the job.' + ) + machine_stats: Optional[MachineStats] = None + operating_system: Optional[str] = Field(None, description='Operating system used') + peak_vram: Optional[int] = Field(None, description='The peak VRAM used by the job') + pr_number: Optional[str] = Field(None, description='The pull request number') + python_version: Optional[str] = Field(None, description='PyTorch version used') + pytorch_version: Optional[str] = Field(None, description='PyTorch version used') + start_time: Optional[int] = Field( + None, description='The start time of the job as a Unix timestamp.' + ) + status: Optional[WorkflowRunStatus] = None + storage_file: Optional[StorageFile] = None + workflow_name: Optional[str] = Field(None, description='Name of the workflow') + + +class KlingAccountError(KlingErrorResponse): + code: Optional[Code] = Field( + None, + description='- 1100: Account exception\n- 1101: Account in arrears (postpaid scenario)\n- 1102: Resource pack depleted or expired (prepaid scenario)\n- 1103: Unauthorized access to requested resource\n', + ) + + +class KlingAuthenticationError(KlingErrorResponse): + code: Optional[Code1] = Field( + None, + description='- 1000: Authentication failed\n- 1001: Authorization is empty\n- 1002: Authorization is invalid\n- 1003: Authorization is not yet valid\n- 1004: Authorization has expired\n', + ) + + +class LumaGenerationRequest(BaseModel): + aspect_ratio: Optional[LumaAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback URL of the generation, a POST request with Generation object will be sent to the callback URL when the generation is dreaming, completed, or failed', + ) + duration: Optional[LumaVideoModelOutputDuration] = None + generation_type: Optional[GenerationType1] = 'video' + keyframes: Optional[LumaKeyframes] = None + loop: Optional[bool] = Field(None, description='Whether to loop the video') + model: Optional[LumaVideoModel] = 'ray-1-6' + prompt: Optional[str] = Field(None, description='The prompt of the generation') + resolution: Optional[LumaVideoModelOutputResolution] = None + + +class CharacterRef(BaseModel): + identity0: Optional[LumaImageIdentity] = None + + +class LumaImageGenerationRequest(BaseModel): + aspect_ratio: Optional[LumaAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the generation' + ) + character_ref: Optional[CharacterRef] = None + generation_type: Optional[GenerationType2] = 'image' + image_ref: Optional[List[LumaImageRef]] = None + model: Optional[LumaImageModel] = 'photon-1' + modify_image_ref: Optional[LumaModifyImageRef] = None + prompt: Optional[str] = Field(None, description='The prompt of the generation') + style_ref: Optional[List[LumaImageRef]] = None + + +class LumaUpscaleVideoGenerationRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the upscale' + ) + generation_type: Optional[GenerationType3] = 'upscale_video' + resolution: Optional[LumaVideoModelOutputResolution] = None + + +class NodeVersion(BaseModel): + changelog: Optional[str] = Field( + None, description='Summary of changes made in this version' + ) + comfy_node_extract_status: Optional[str] = Field( + None, description='The status of comfy node extraction process.' + ) + createdAt: Optional[datetime] = Field( + None, description='The date and time the version was created.' + ) + dependencies: Optional[List[str]] = Field( + None, description='A list of pip dependencies required by the node.' + ) + deprecated: Optional[bool] = Field( + None, description='Indicates if this version is deprecated.' + ) + downloadUrl: Optional[str] = Field( + None, description='[Output Only] URL to download this version of the node' + ) + id: Optional[str] = None + node_id: Optional[str] = Field( + None, description='The unique identifier of the node.' + ) + status: Optional[NodeVersionStatus] = None + status_reason: Optional[str] = Field( + None, description='The reason for the status change.' + ) + version: Optional[str] = Field( + None, + description='The version identifier, following semantic versioning. Must be unique for the node.', + ) + + +class PublisherMember(BaseModel): + id: Optional[str] = Field( + None, description='The unique identifier for the publisher member.' + ) + role: Optional[str] = Field( + None, description='The role of the user in the publisher.' + ) + user: Optional[PublisherUser] = None + + +class StripeCharge(BaseModel): + amount: Optional[int] = None + amount_captured: Optional[int] = None + amount_refunded: Optional[int] = None + application: Optional[str] = None + application_fee: Optional[str] = None + application_fee_amount: Optional[int] = None + balance_transaction: Optional[str] = None + billing_details: Optional[StripeBillingDetails] = None + calculated_statement_descriptor: Optional[str] = None + captured: Optional[bool] = None + created: Optional[int] = None + currency: Optional[str] = None + customer: Optional[str] = None + description: Optional[str] = None + destination: Optional[Any] = None + dispute: Optional[Any] = None + disputed: Optional[bool] = None + failure_balance_transaction: Optional[Any] = None + failure_code: Optional[Any] = None + failure_message: Optional[Any] = None + fraud_details: Optional[Dict[str, Any]] = None + id: Optional[str] = None + invoice: Optional[Any] = None + livemode: Optional[bool] = None + metadata: Optional[Dict[str, Any]] = None + object: Optional[Object] = None + on_behalf_of: Optional[Any] = None + order: Optional[Any] = None + outcome: Optional[StripeOutcome] = None + paid: Optional[bool] = None + payment_intent: Optional[str] = None + payment_method: Optional[str] = None + payment_method_details: Optional[StripePaymentMethodDetails] = None + radar_options: Optional[Dict[str, Any]] = None + receipt_email: Optional[str] = None + receipt_number: Optional[str] = None + receipt_url: Optional[str] = None + refunded: Optional[bool] = None + refunds: Optional[StripeRefundList] = None + review: Optional[Any] = None + shipping: Optional[StripeShipping] = None + source: Optional[Any] = None + source_transfer: Optional[Any] = None + statement_descriptor: Optional[Any] = None + statement_descriptor_suffix: Optional[Any] = None + status: Optional[str] = None + transfer_data: Optional[Any] = None + transfer_group: Optional[Any] = None + + +class StripeChargeList(BaseModel): + data: Optional[List[StripeCharge]] = None + has_more: Optional[bool] = None + object: Optional[str] = None + total_count: Optional[int] = None + url: Optional[str] = None + + +class StripePaymentIntent(BaseModel): + amount: Optional[int] = None + amount_capturable: Optional[int] = None + amount_details: Optional[StripeAmountDetails] = None + amount_received: Optional[int] = None + application: Optional[str] = None + application_fee_amount: Optional[int] = None + automatic_payment_methods: Optional[Any] = None + canceled_at: Optional[int] = None + cancellation_reason: Optional[str] = None + capture_method: Optional[str] = None + charges: Optional[StripeChargeList] = None + client_secret: Optional[str] = None + confirmation_method: Optional[str] = None + created: Optional[int] = None + currency: Optional[str] = None + customer: Optional[str] = None + description: Optional[str] = None + id: Optional[str] = None + invoice: Optional[str] = None + last_payment_error: Optional[Any] = None + latest_charge: Optional[str] = None + livemode: Optional[bool] = None + metadata: Optional[Dict[str, Any]] = None + next_action: Optional[Any] = None + object: Optional[Object2] = None + on_behalf_of: Optional[Any] = None + payment_method: Optional[str] = None + payment_method_configuration_details: Optional[Any] = None + payment_method_options: Optional[StripePaymentMethodOptions] = None + payment_method_types: Optional[List[str]] = None + processing: Optional[Any] = None + receipt_email: Optional[str] = None + review: Optional[Any] = None + setup_future_usage: Optional[Any] = None + shipping: Optional[StripeShipping] = None + source: Optional[Any] = None + statement_descriptor: Optional[Any] = None + statement_descriptor_suffix: Optional[Any] = None + status: Optional[str] = None + transfer_data: Optional[Any] = None + transfer_group: Optional[Any] = None + + +class LumaGeneration(BaseModel): + assets: Optional[LumaAssets] = None + created_at: Optional[datetime] = Field( + None, description='The date and time when the generation was created' + ) + failure_reason: Optional[str] = Field( + None, description='The reason for the state of the generation' + ) + generation_type: Optional[LumaGenerationType] = None + id: Optional[UUID] = Field(None, description='The ID of the generation') + model: Optional[str] = Field(None, description='The model used for the generation') + request: Optional[ + Union[ + LumaGenerationRequest, + LumaImageGenerationRequest, + LumaUpscaleVideoGenerationRequest, + LumaAudioGenerationRequest, + ] + ] = Field(None, description='The request of the generation') + state: Optional[LumaState] = None + + +class Publisher(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='The date and time the publisher was created.' + ) + description: Optional[str] = None + id: Optional[str] = Field( + None, + description="The unique identifier for the publisher. It's akin to a username. Should be lowercase.", + ) + logo: Optional[str] = Field(None, description="URL to the publisher's logo.") + members: Optional[List[PublisherMember]] = Field( + None, description='A list of members in the publisher.' + ) + name: Optional[str] = None + source_code_repo: Optional[str] = None + status: Optional[PublisherStatus] = None + support: Optional[str] = None + website: Optional[str] = None + + +class Data8(BaseModel): + object: Optional[StripePaymentIntent] = None + + +class StripeEvent(BaseModel): + api_version: Optional[str] = None + created: Optional[int] = None + data: Data8 + id: str + livemode: Optional[bool] = None + object: Object1 + pending_webhooks: Optional[int] = None + request: Optional[StripeRequestInfo] = None + type: Type7 + + +class Node(BaseModel): + author: Optional[str] = None + category: Optional[str] = Field(None, description='The category of the node.') + description: Optional[str] = None + downloads: Optional[int] = Field( + None, description='The number of downloads of the node.' + ) + icon: Optional[str] = Field(None, description="URL to the node's icon.") + id: Optional[str] = Field(None, description='The unique identifier of the node.') + latest_version: Optional[NodeVersion] = None + license: Optional[str] = Field( + None, description="The path to the LICENSE file in the node's repository." + ) + name: Optional[str] = Field(None, description='The display name of the node.') + publisher: Optional[Publisher] = None + rating: Optional[float] = Field(None, description='The average rating of the node.') + repository: Optional[str] = Field(None, description="URL to the node's repository.") + status: Optional[NodeStatus] = None + status_detail: Optional[str] = Field( + None, description='The status detail of the node.' + ) + tags: Optional[List[str]] = None + translations: Optional[Dict[str, Dict[str, Any]]] = None diff --git a/comfy_api_nodes/apis/stubs.py b/comfy_api_nodes/apis/stubs.py deleted file mode 100644 index d1da6a5ab36..00000000000 --- a/comfy_api_nodes/apis/stubs.py +++ /dev/null @@ -1,513 +0,0 @@ -# generated by datamodel-codegen: -# filename: http://localhost:8080/openapi -# timestamp: 2025-04-18T21:35:21+00:00 - -from __future__ import annotations - -from datetime import datetime -from enum import Enum -from typing import Any, Dict, List, Optional -from uuid import UUID - -from pydantic import BaseModel, Field, conint, constr - - -class ComfyNode(BaseModel): - category: Optional[str] = Field( - None, - description='UI category where the node is listed, used for grouping nodes.', - ) - comfy_node_name: Optional[str] = Field( - None, description='Unique identifier for the node' - ) - deprecated: Optional[bool] = Field( - None, - description='Indicates if the node is deprecated. Deprecated nodes are hidden in the UI.', - ) - description: Optional[str] = Field( - None, description="Brief description of the node's functionality or purpose." - ) - experimental: Optional[bool] = Field( - None, - description='Indicates if the node is experimental, subject to changes or removal.', - ) - function: Optional[str] = Field( - None, description='Name of the entry-point function to execute the node.' - ) - input_types: Optional[str] = Field(None, description='Defines input parameters') - output_is_list: Optional[List[bool]] = Field( - None, description='Boolean values indicating if each output is a list.' - ) - return_names: Optional[str] = Field( - None, description='Names of the outputs for clarity in workflows.' - ) - return_types: Optional[str] = Field( - None, description='Specifies the types of outputs produced by the node.' - ) - - -class ComfyNodeCloudBuildInfo(BaseModel): - build_id: Optional[str] = None - location: Optional[str] = None - project_id: Optional[str] = None - project_number: Optional[str] = None - - -class Customer(BaseModel): - createdAt: Optional[datetime] = Field( - None, description='The date and time the user was created' - ) - email: Optional[str] = Field(None, description='The email address for this user') - id: str = Field(..., description='The firebase UID of the user') - name: Optional[str] = Field(None, description='The name for this user') - updatedAt: Optional[datetime] = Field( - None, description='The date and time the user was last updated' - ) - - -class Error(BaseModel): - details: Optional[List[str]] = Field( - None, - description='Optional detailed information about the error or hints for resolving it.', - ) - message: Optional[str] = Field( - None, description='A clear and concise description of the error.' - ) - - -class ErrorResponse(BaseModel): - error: str - message: str - - -class GitCommitSummary(BaseModel): - author: Optional[str] = Field(None, description='The author of the commit') - branch_name: Optional[str] = Field( - None, description='The branch where the commit was made' - ) - commit_hash: Optional[str] = Field(None, description='The hash of the commit') - commit_name: Optional[str] = Field(None, description='The name of the commit') - status_summary: Optional[Dict[str, str]] = Field( - None, description='A map of operating system to status pairs' - ) - timestamp: Optional[datetime] = Field( - None, description='The timestamp when the commit was made' - ) - - -class ImageRequest(BaseModel): - aspect_ratio: Optional[str] = Field( - None, - description="Optional. The aspect ratio (e.g., 'ASPECT_16_9', 'ASPECT_1_1'). Cannot be used with resolution. Defaults to 'ASPECT_1_1' if unspecified.", - ) - color_palette: Optional[Dict[str, Any]] = Field( - None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' - ) - magic_prompt_option: Optional[str] = Field( - None, description="Optional. MagicPrompt usage ('AUTO', 'ON', 'OFF')." - ) - model: Optional[str] = Field( - None, - description="Optional. The model used (e.g., 'V_2', 'V_2A_TURBO'). Defaults to 'V_2' if unspecified.", - ) - negative_prompt: Optional[str] = Field( - None, - description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', - ) - num_images: Optional[conint(ge=1, le=8)] = Field( - 1, description='Optional. Number of images to generate (1-8). Defaults to 1.' - ) - prompt: str = Field( - ..., description='Required. The prompt to use to generate the image.' - ) - resolution: Optional[str] = Field( - None, - description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", - ) - seed: Optional[conint(ge=0, le=2147483647)] = Field( - None, description='Optional. A number between 0 and 2147483647.' - ) - style_type: Optional[str] = Field( - None, - description="Optional. Style type ('AUTO', 'GENERAL', 'REALISTIC', 'DESIGN', 'RENDER_3D', 'ANIME'). Only for models V_2 and above.", - ) - - -class IdeogramGenerateRequest(BaseModel): - image_request: ImageRequest = Field( - ..., description='The image generation request parameters.' - ) - - -class Datum(BaseModel): - is_image_safe: Optional[bool] = Field( - None, description='Indicates whether the image is considered safe.' - ) - prompt: Optional[str] = Field( - None, description='The prompt used to generate this image.' - ) - resolution: Optional[str] = Field( - None, description="The resolution of the generated image (e.g., '1024x1024')." - ) - seed: Optional[int] = Field( - None, description='The seed value used for this generation.' - ) - style_type: Optional[str] = Field( - None, - description="The style type used for generation (e.g., 'REALISTIC', 'ANIME').", - ) - url: Optional[str] = Field(None, description='URL to the generated image.') - - -class IdeogramGenerateResponse(BaseModel): - created: Optional[datetime] = Field( - None, description='Timestamp when the generation was created.' - ) - data: Optional[List[Datum]] = Field( - None, description='Array of generated image information.' - ) - - -class MachineStats(BaseModel): - cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') - disk_capacity: Optional[str] = Field( - None, description='Total disk capacity on the machine.' - ) - gpu_type: Optional[str] = Field( - None, description='The GPU type. eg. NVIDIA Tesla K80' - ) - initial_cpu: Optional[str] = Field( - None, description='Initial CPU available before the job starts.' - ) - initial_disk: Optional[str] = Field( - None, description='Initial disk available before the job starts.' - ) - initial_ram: Optional[str] = Field( - None, description='Initial RAM available before the job starts.' - ) - machine_name: Optional[str] = Field(None, description='Name of the machine.') - memory_capacity: Optional[str] = Field( - None, description='Total memory on the machine.' - ) - os_version: Optional[str] = Field( - None, description='The operating system version. eg. Ubuntu Linux 20.04' - ) - pip_freeze: Optional[str] = Field(None, description='The pip freeze output') - vram_time_series: Optional[Dict[str, Any]] = Field( - None, description='Time series of VRAM usage.' - ) - - -class MinimaxBaseResponse(BaseModel): - status_code: int = Field( - ..., - description='Status code. 0 indicates success, other values indicate errors.', - ) - status_msg: str = Field( - ..., description='Specific error details or success message.' - ) - - -class File(BaseModel): - bytes: Optional[int] = Field(None, description='File size in bytes') - created_at: Optional[int] = Field( - None, description='Unix timestamp when the file was created, in seconds' - ) - download_url: Optional[str] = Field( - None, description='The URL to download the video' - ) - file_id: Optional[int] = Field(None, description='Unique identifier for the file') - filename: Optional[str] = Field(None, description='The name of the file') - purpose: Optional[str] = Field(None, description='The purpose of using the file') - - -class MinimaxFileRetrieveResponse(BaseModel): - base_resp: MinimaxBaseResponse - file: File - - -class Status(str, Enum): - Queueing = 'Queueing' - Preparing = 'Preparing' - Processing = 'Processing' - Success = 'Success' - Fail = 'Fail' - - -class MinimaxTaskResultResponse(BaseModel): - base_resp: MinimaxBaseResponse - file_id: Optional[str] = Field( - None, - description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', - ) - status: Status = Field( - ..., - description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", - ) - task_id: str = Field(..., description='The task ID being queried.') - - -class Model(str, Enum): - T2V_01_Director = 'T2V-01-Director' - I2V_01_Director = 'I2V-01-Director' - S2V_01 = 'S2V-01' - I2V_01 = 'I2V-01' - I2V_01_live = 'I2V-01-live' - T2V_01 = 'T2V-01' - - -class SubjectReferenceItem(BaseModel): - image: Optional[str] = Field( - None, description='URL or base64 encoding of the subject reference image.' - ) - mask: Optional[str] = Field( - None, - description='URL or base64 encoding of the mask for the subject reference image.', - ) - - -class MinimaxVideoGenerationRequest(BaseModel): - callback_url: Optional[str] = Field( - None, - description='Optional. URL to receive real-time status updates about the video generation task.', - ) - first_frame_image: Optional[str] = Field( - None, - description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', - ) - model: Model = Field( - ..., - description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', - ) - prompt: Optional[constr(max_length=2000)] = Field( - None, - description='Description of the video. Should be less than 2000 characters. Supports camera movement instructions in [brackets].', - ) - prompt_optimizer: Optional[bool] = Field( - True, - description='If true (default), the model will automatically optimize the prompt. Set to false for more precise control.', - ) - subject_reference: Optional[List[SubjectReferenceItem]] = Field( - None, - description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', - ) - - -class MinimaxVideoGenerationResponse(BaseModel): - base_resp: MinimaxBaseResponse - task_id: str = Field( - ..., description='The task ID for the asynchronous video generation task.' - ) - - -class NodeStatus(str, Enum): - NodeStatusActive = 'NodeStatusActive' - NodeStatusDeleted = 'NodeStatusDeleted' - NodeStatusBanned = 'NodeStatusBanned' - - -class NodeVersionStatus(str, Enum): - NodeVersionStatusActive = 'NodeVersionStatusActive' - NodeVersionStatusDeleted = 'NodeVersionStatusDeleted' - NodeVersionStatusBanned = 'NodeVersionStatusBanned' - NodeVersionStatusPending = 'NodeVersionStatusPending' - NodeVersionStatusFlagged = 'NodeVersionStatusFlagged' - - -class NodeVersionUpdateRequest(BaseModel): - changelog: Optional[str] = Field( - None, description='The changelog describing the version changes.' - ) - deprecated: Optional[bool] = Field( - None, description='Whether the version is deprecated.' - ) - - -class PersonalAccessToken(BaseModel): - createdAt: Optional[datetime] = Field( - None, description='[Output Only]The date and time the token was created.' - ) - description: Optional[str] = Field( - None, - description="Optional. A more detailed description of the token's intended use.", - ) - id: Optional[UUID] = Field(None, description='Unique identifier for the GitCommit') - name: Optional[str] = Field( - None, - description='Required. The name of the token. Can be a simple description.', - ) - token: Optional[str] = Field( - None, - description='[Output Only]. The personal access token. Only returned during creation.', - ) - - -class PublisherStatus(str, Enum): - PublisherStatusActive = 'PublisherStatusActive' - PublisherStatusBanned = 'PublisherStatusBanned' - - -class PublisherUser(BaseModel): - email: Optional[str] = Field(None, description='The email address for this user.') - id: Optional[str] = Field(None, description='The unique id for this user.') - name: Optional[str] = Field(None, description='The name for this user.') - - -class StorageFile(BaseModel): - file_path: Optional[str] = Field(None, description='Path to the file in storage') - id: Optional[UUID] = Field( - None, description='Unique identifier for the storage file' - ) - public_url: Optional[str] = Field(None, description='Public URL') - - -class User(BaseModel): - email: Optional[str] = Field(None, description='The email address for this user.') - id: Optional[str] = Field(None, description='The unique id for this user.') - isAdmin: Optional[bool] = Field( - None, description='Indicates if the user has admin privileges.' - ) - isApproved: Optional[bool] = Field( - None, description='Indicates if the user is approved.' - ) - name: Optional[str] = Field(None, description='The name for this user.') - - -class WorkflowRunStatus(str, Enum): - WorkflowRunStatusStarted = 'WorkflowRunStatusStarted' - WorkflowRunStatusFailed = 'WorkflowRunStatusFailed' - WorkflowRunStatusCompleted = 'WorkflowRunStatusCompleted' - - -class ActionJobResult(BaseModel): - action_job_id: Optional[str] = Field( - None, description='Identifier of the job this result belongs to' - ) - action_run_id: Optional[str] = Field( - None, description='Identifier of the run this result belongs to' - ) - author: Optional[str] = Field(None, description='The author of the commit') - avg_vram: Optional[int] = Field( - None, description='The average VRAM used by the job' - ) - branch_name: Optional[str] = Field( - None, description='Name of the relevant git branch' - ) - comfy_run_flags: Optional[str] = Field( - None, description='The comfy run flags. E.g. `--low-vram`' - ) - commit_hash: Optional[str] = Field(None, description='The hash of the commit') - commit_id: Optional[str] = Field(None, description='The ID of the commit') - commit_message: Optional[str] = Field(None, description='The message of the commit') - commit_time: Optional[int] = Field( - None, description='The Unix timestamp when the commit was made' - ) - cuda_version: Optional[str] = Field(None, description='CUDA version used') - end_time: Optional[int] = Field( - None, description='The end time of the job as a Unix timestamp.' - ) - git_repo: Optional[str] = Field(None, description='The repository name') - id: Optional[UUID] = Field(None, description='Unique identifier for the job result') - job_trigger_user: Optional[str] = Field( - None, description='The user who triggered the job.' - ) - machine_stats: Optional[MachineStats] = None - operating_system: Optional[str] = Field(None, description='Operating system used') - peak_vram: Optional[int] = Field(None, description='The peak VRAM used by the job') - pr_number: Optional[str] = Field(None, description='The pull request number') - python_version: Optional[str] = Field(None, description='PyTorch version used') - pytorch_version: Optional[str] = Field(None, description='PyTorch version used') - start_time: Optional[int] = Field( - None, description='The start time of the job as a Unix timestamp.' - ) - status: Optional[WorkflowRunStatus] = None - storage_file: Optional[StorageFile] = None - workflow_name: Optional[str] = Field(None, description='Name of the workflow') - - -class NodeVersion(BaseModel): - changelog: Optional[str] = Field( - None, description='Summary of changes made in this version' - ) - comfy_node_extract_status: Optional[str] = Field( - None, description='The status of comfy node extraction process.' - ) - createdAt: Optional[datetime] = Field( - None, description='The date and time the version was created.' - ) - dependencies: Optional[List[str]] = Field( - None, description='A list of pip dependencies required by the node.' - ) - deprecated: Optional[bool] = Field( - None, description='Indicates if this version is deprecated.' - ) - downloadUrl: Optional[str] = Field( - None, description='[Output Only] URL to download this version of the node' - ) - id: Optional[str] = None - node_id: Optional[str] = Field( - None, description='The unique identifier of the node.' - ) - status: Optional[NodeVersionStatus] = None - status_reason: Optional[str] = Field( - None, description='The reason for the status change.' - ) - version: Optional[str] = Field( - None, - description='The version identifier, following semantic versioning. Must be unique for the node.', - ) - - -class PublisherMember(BaseModel): - id: Optional[str] = Field( - None, description='The unique identifier for the publisher member.' - ) - role: Optional[str] = Field( - None, description='The role of the user in the publisher.' - ) - user: Optional[PublisherUser] = None - - -class Publisher(BaseModel): - createdAt: Optional[datetime] = Field( - None, description='The date and time the publisher was created.' - ) - description: Optional[str] = None - id: Optional[str] = Field( - None, - description="The unique identifier for the publisher. It's akin to a username. Should be lowercase.", - ) - logo: Optional[str] = Field(None, description="URL to the publisher's logo.") - members: Optional[List[PublisherMember]] = Field( - None, description='A list of members in the publisher.' - ) - name: Optional[str] = None - source_code_repo: Optional[str] = None - status: Optional[PublisherStatus] = None - support: Optional[str] = None - website: Optional[str] = None - - -class Node(BaseModel): - author: Optional[str] = None - category: Optional[str] = Field(None, description='The category of the node.') - description: Optional[str] = None - downloads: Optional[int] = Field( - None, description='The number of downloads of the node.' - ) - icon: Optional[str] = Field(None, description="URL to the node's icon.") - id: Optional[str] = Field(None, description='The unique identifier of the node.') - latest_version: Optional[NodeVersion] = None - license: Optional[str] = Field( - None, description="The path to the LICENSE file in the node's repository." - ) - name: Optional[str] = Field(None, description='The display name of the node.') - publisher: Optional[Publisher] = None - rating: Optional[float] = Field(None, description='The average rating of the node.') - repository: Optional[str] = Field(None, description="URL to the node's repository.") - status: Optional[NodeStatus] = None - status_detail: Optional[str] = Field( - None, description='The status detail of the node.' - ) - tags: Optional[List[str]] = None - translations: Optional[Dict[str, Dict[str, Any]]] = None diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 24bd2f3ff6c..b5b3ad5b822 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -1,7 +1,7 @@ from inspect import cleandoc from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO from comfy_api_nodes.apis.client import ApiEndpoint, SynchronousOperation, HttpMethod -from comfy_api_nodes.apis.stubs import IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest +from comfy_api_nodes.apis import IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest class IdeogramTextToImage(ComfyNodeABC): """ From 824c87a106b0b0d98bce0fb3e4f5c6868db9d86e Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 01:58:55 -0700 Subject: [PATCH 17/40] Use pydantic v2. --- .github/workflows/update-api-stubs.yml | 2 +- comfy_api_nodes/apis/PixverseController.py | 2 +- comfy_api_nodes/apis/PixverseDto.py | 18 ++-- comfy_api_nodes/apis/__init__.py | 103 +++++++++++++++++++-- 4 files changed, 105 insertions(+), 20 deletions(-) diff --git a/.github/workflows/update-api-stubs.yml b/.github/workflows/update-api-stubs.yml index 4eda03956ec..b27f2c58064 100644 --- a/.github/workflows/update-api-stubs.yml +++ b/.github/workflows/update-api-stubs.yml @@ -25,7 +25,7 @@ jobs: - name: Generate API models run: | - datamodel-codegen --use-subclass-enum --url https://stagingapi.comfy.org/openapi --output comfy_api_nodes/apis + datamodel-codegen --use-subclass-enum --url https://stagingapi.comfy.org/openapi --output comfy_api_nodes/apis --output-model-type pydantic_v2.BaseModel - name: Check for changes id: git-check diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py index 2899706f6f7..583fd26da58 100644 --- a/comfy_api_nodes/apis/PixverseController.py +++ b/comfy_api_nodes/apis/PixverseController.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: https://stagingapi.comfy.org/openapi -# timestamp: 2025-04-22T08:06:44+00:00 +# timestamp: 2025-04-22T08:58:11+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py index 50d813c056d..32da4ab0582 100644 --- a/comfy_api_nodes/apis/PixverseDto.py +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: https://stagingapi.comfy.org/openapi -# timestamp: 2025-04-22T08:06:44+00:00 +# timestamp: 2025-04-22T08:58:11+00:00 from __future__ import annotations @@ -15,20 +15,20 @@ class V2OpenAPII2VResp(BaseModel): class V2OpenAPIT2VReq(BaseModel): aspect_ratio: str = Field( - ..., description='Aspect ratio (16:9, 4:3, 1:1, 3:4, 9:16)', example='16:9' + ..., description='Aspect ratio (16:9, 4:3, 1:1, 3:4, 9:16)', examples=['16:9'] ) duration: int = Field( ..., description='Video duration (5, 8 seconds, --model=v3.5 only allows 5,8; --quality=1080p does not support 8s)', - example=5, + examples=[5], ) model: str = Field( - ..., description='Model version (only supports v3.5)', example='v3.5' + ..., description='Model version (only supports v3.5)', examples=['v3.5'] ) motion_mode: Optional[str] = Field( 'normal', description='Motion mode (normal, fast, --fast only available when duration=5; --quality=1080p does not support fast)', - example='normal', + examples=['normal'], ) negative_prompt: Optional[constr(max_length=2048)] = Field( None, description='Negative prompt\n' @@ -37,21 +37,21 @@ class V2OpenAPIT2VReq(BaseModel): quality: str = Field( ..., description='Video quality ("360p"(Turbo model), "540p", "720p", "1080p")', - example='540p', + examples=['540p'], ) seed: Optional[int] = Field(None, description='Random seed, range: 0 - 2147483647') style: Optional[str] = Field( None, description='Style (effective when model=v3.5, "anime", "3d_animation", "clay", "comic", "cyberpunk") Do not include style parameter unless needed', - example='anime', + examples=['anime'], ) template_id: Optional[int] = Field( None, description='Template ID (template_id must be activated before use)', - example=302325299692608, + examples=[302325299692608], ) water_mark: Optional[bool] = Field( False, description='Watermark (true: add watermark, false: no watermark)', - example=False, + examples=[False], ) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 30466816883..e34b68f8df4 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: https://stagingapi.comfy.org/openapi -# timestamp: 2025-04-22T08:06:44+00:00 +# timestamp: 2025-04-22T08:58:11+00:00 from __future__ import annotations @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Literal, Optional, Union from uuid import UUID -from pydantic import AnyUrl, BaseModel, Field, confloat, conint, constr +from pydantic import AnyUrl, BaseModel, Field, RootModel, confloat, conint, constr class BFLFluxProGenerateRequest(BaseModel): @@ -829,8 +829,8 @@ class LumaImageReference(BaseModel): url: AnyUrl = Field(..., description='The URL of the image') -class LumaKeyframe(BaseModel): - __root__: Union[LumaGenerationReference, LumaImageReference] = Field( +class LumaKeyframe(RootModel[Union[LumaGenerationReference, LumaImageReference]]): + root: Union[LumaGenerationReference, LumaImageReference] = Field( ..., description='A keyframe can be either a Generation reference, an Image, or a Video', discriminator='type', @@ -884,8 +884,10 @@ class LumaVideoModelOutputDuration1(str, Enum): field_9s = '9s' -class LumaVideoModelOutputDuration(BaseModel): - __root__: Union[LumaVideoModelOutputDuration1, str] +class LumaVideoModelOutputDuration( + RootModel[Union[LumaVideoModelOutputDuration1, str]] +): + root: Union[LumaVideoModelOutputDuration1, str] class LumaVideoModelOutputResolution1(str, Enum): @@ -895,8 +897,10 @@ class LumaVideoModelOutputResolution1(str, Enum): field_4k = '4k' -class LumaVideoModelOutputResolution(BaseModel): - __root__: Union[LumaVideoModelOutputResolution1, str] +class LumaVideoModelOutputResolution( + RootModel[Union[LumaVideoModelOutputResolution1, str]] +): + root: Union[LumaVideoModelOutputResolution1, str] class MachineStats(BaseModel): @@ -1054,6 +1058,87 @@ class NodeVersionUpdateRequest(BaseModel): ) +class Background(str, Enum): + transparent = 'transparent' + opaque = 'opaque' + + +class Moderation(str, Enum): + low = 'low' + auto = 'auto' + + +class OutputFormat(str, Enum): + png = 'png' + webp = 'webp' + jpeg = 'jpeg' + + +class Quality(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + + +class ResponseFormat(str, Enum): + url = 'url' + b64_json = 'b64_json' + + +class Style(str, Enum): + vivid = 'vivid' + natural = 'natural' + + +class OpenAIImageGenerationRequest(BaseModel): + background: Optional[Background] = Field( + None, description='Background transparency', examples=['opaque'] + ) + model: Optional[str] = Field( + None, + description='The model to use for image generation', + examples=['gpt-image-1'], + ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + n: Optional[int] = Field( + None, + description='The number of images to generate (1–10). Only 1 supported for dall-e-3.', + examples=[1], + ) + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0–100)', examples=[100] + ) + output_format: Optional[OutputFormat] = Field( + None, description='Format of the output image', examples=['png'] + ) + prompt: str = Field( + ..., + description='A text description of the desired image', + examples=['Draw a rocket in front of a blackhole in deep space'], + ) + quality: Optional[Quality] = Field( + None, description='The quality of the generated image', examples=['high'] + ) + response_format: Optional[ResponseFormat] = Field( + None, description='Response format of image data', examples=['b64_json'] + ) + size: Optional[str] = Field( + None, + description='Size of the image (e.g., 1024x1024, 1536x1024, auto)', + examples=['1024x1536'], + ) + style: Optional[Style] = Field( + None, description='Style of the image (only for dall-e-3)', examples=['vivid'] + ) + user: Optional[str] = Field( + None, + description='A unique identifier for end-user monitoring', + examples=['user-1234'], + ) + + class PersonalAccessToken(BaseModel): createdAt: Optional[datetime] = Field( None, description='[Output Only]The date and time the token was created.' @@ -1299,7 +1384,7 @@ class PersonGeneration(str, Enum): class Parameters(BaseModel): - aspectRatio: Optional[str] = Field(None, example='16:9') + aspectRatio: Optional[str] = Field(None, examples=['16:9']) durationSeconds: Optional[int] = None enhancePrompt: Optional[bool] = None negativePrompt: Optional[str] = None From 273f8e5a8d6a2e5b4b8d990bbdbda5d70c387798 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 01:59:59 -0700 Subject: [PATCH 18/40] Use pydantic v2. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c2360f2bf06..05fd2403c51 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,4 +23,4 @@ kornia>=0.7.1 spandrel soundfile av -pydantic +pydantic~=2.0 From 1c1ef2d9e6240c5765df942ffa6af2e570cebcc9 Mon Sep 17 00:00:00 2001 From: Yoland Y <4950057+yoland68@users.noreply.github.com> Date: Tue, 22 Apr 2025 02:05:48 -0700 Subject: [PATCH 19/40] Add basic OpenAITextToImage node --- comfy_api_nodes/nodes_api.py | 143 +++++++++++++++++++++++++++++++++-- 1 file changed, 138 insertions(+), 5 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index b5b3ad5b822..600c1bc5f2f 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -1,7 +1,15 @@ +import io from inspect import cleandoc -from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO -from comfy_api_nodes.apis.client import ApiEndpoint, SynchronousOperation, HttpMethod -from comfy_api_nodes.apis import IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest + +from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict +from comfy_api_nodes.apis import ( + IdeogramGenerateRequest, + IdeogramGenerateResponse, + ImageRequest, + OpenAIImageGenerationRequest, +) +from comfy_api_nodes.apis.client import ApiEndpoint, HttpMethod, SynchronousOperation + class IdeogramTextToImage(ComfyNodeABC): """ @@ -91,11 +99,12 @@ def INPUT_TYPES(cls) -> InputTypeDict: def api_call(self, prompt, model, aspect_ratio=None, resolution=None, magic_prompt_option="AUTO", seed=0, style_type="NONE", negative_prompt="", num_images=1, color_palette="", auth_token=None): - import torch - from PIL import Image import io + import numpy as np import requests + import torch + from PIL import Image operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -156,14 +165,138 @@ def api_call(self, prompt, model, aspect_ratio=None, resolution=None, #def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen): # return "" +class OpenAITextToImage(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's DALL·E 3 endpoint. + + Uses the proxy at /proxy/dalle-3/generate. Returned URLs are short‑lived, + so download or cache results if you need to keep them. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": (IO.STRING, { + "multiline": True, + "default": "", + "tooltip": "Text prompt for DALL·E", + }), + # TODO: add NEW MODEL + "model": (IO.COMBO, { + "options": ["dall-e-3", "dall-e-2"], + "default": "dall-e-3", + "tooltip": "OpenAI model name", + }), + }, + "optional": { + "n": (IO.INT, { + "default": 1, + "min": 1, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "How many images to generate", + }), + "size": (IO.COMBO, { + "options": ["256x256", "512x512", "1024x1792", "1792x1024", "1024x1024", "1536x1024", "1024x1536", "auto"], + "default": "auto", + "tooltip": "Image size", + }), + "seed": (IO.INT, { + "default": 0, + "min": 0, + "max": 2**31-1, + "step": 1, + "display": "number", + "tooltip": "Optional random seed", + }), + }, + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "Example" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call(self, prompt, model, n=1, size="1024x1024", seed=0): + # Validate size based on model + if model == "dall-e-2": + if size == "auto": + size = "1024x1024" + valid_sizes = ["256x256", "512x512", "1024x1024"] + if size not in valid_sizes: + raise ValueError(f"Size {size} not valid for dall-e-2. Must be one of: {', '.join(valid_sizes)}") + elif model == "dall-e-3": + if size == "auto": + size = "1024x1024" + valid_sizes = ["1024x1024", "1792x1024", "1024x1792"] + if size not in valid_sizes: + raise ValueError(f"Size {size} not valid for dall-e-3. Must be one of: {', '.join(valid_sizes)}") + # TODO: add NEW MODEL + + + import io + + import numpy as np + import torch + from PIL import Image + + # build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/openai/images/generations", + method=HttpMethod.POST, + request_model=OpenAIImageGenerationRequest, + response_model=None + ), + request=OpenAIImageGenerationRequest( + model=model, + prompt=prompt, + n=n, + size=size, + seed=seed if seed != 0 else None + ), + ) + + response = operation.execute() + + # validate raw JSON response + if not isinstance(response, dict) or 'data' not in response: + raise Exception("Invalid response format from OpenAI endpoint") + + data = response['data'] + if not data or len(data) == 0: + raise Exception("No images returned from OpenAI endpoint") + + # Get base64 image data + b64_data = data[0].get('b64_json') + if not b64_data: + raise Exception("No image data in OpenAI response") + + # decode base64 to image + import base64 + img_data = base64.b64decode(b64_data) + img = Image.open(io.BytesIO(img_data)).convert("RGB") + + # Convert to tensor + arr = np.array(img).astype(np.float32) / 255.0 + tensor = torch.from_numpy(arr)[None, ...] # add batch dimension + + return (tensor,) # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique NODE_CLASS_MAPPINGS = { "IdeogramTextToImage": IdeogramTextToImage, + "OpenAIDalleTextToImage": OpenAIDalleTextToImage, } # A dictionary that contains the friendly/humanly readable titles for the nodes NODE_DISPLAY_NAME_MAPPINGS = { "IdeogramTextToImage": "Ideogram Text to Image", + "OpenAIDalleTextToImage": "OpenAI DALL·E 3 Text to Image", } From e90100e1a5f2e11bbd0b4afbf9b298b04555cfc9 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 02:45:41 -0700 Subject: [PATCH 20/40] Add. --- comfy_api_nodes/apis/PixverseController.py | 4 ++-- comfy_api_nodes/apis/PixverseDto.py | 4 ++-- comfy_api_nodes/apis/__init__.py | 17 +++++++++++++---- comfy_api_nodes/apis/client.py | 3 ++- comfy_api_nodes/nodes_api.py | 17 ++++++++++------- 5 files changed, 29 insertions(+), 16 deletions(-) diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py index 583fd26da58..db49134fe5c 100644 --- a/comfy_api_nodes/apis/PixverseController.py +++ b/comfy_api_nodes/apis/PixverseController.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: -# filename: https://stagingapi.comfy.org/openapi -# timestamp: 2025-04-22T08:58:11+00:00 +# filename: http://localhost:8080/openapi +# timestamp: 2025-04-22T09:45:21+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py index 32da4ab0582..36e7b090ee5 100644 --- a/comfy_api_nodes/apis/PixverseDto.py +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: -# filename: https://stagingapi.comfy.org/openapi -# timestamp: 2025-04-22T08:58:11+00:00 +# filename: http://localhost:8080/openapi +# timestamp: 2025-04-22T09:45:21+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index e34b68f8df4..fbf557bbfac 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: -# filename: https://stagingapi.comfy.org/openapi -# timestamp: 2025-04-22T08:58:11+00:00 +# filename: http://localhost:8080/openapi +# timestamp: 2025-04-22T09:45:21+00:00 from __future__ import annotations @@ -1139,6 +1139,15 @@ class OpenAIImageGenerationRequest(BaseModel): ) +class Datum1(BaseModel): + b64_json: Optional[str] = Field(None, description='Base64 encoded image data') + url: Optional[str] = Field(None, description='URL of the image') + + +class OpenAIImageGenerationResponse(BaseModel): + data: Optional[List[Datum1]] = None + + class PersonalAccessToken(BaseModel): createdAt: Optional[datetime] = Field( None, description='[Output Only]The date and time the token was created.' @@ -1186,7 +1195,7 @@ class RecraftImageGenerationRequest(BaseModel): ) -class Datum1(BaseModel): +class Datum2(BaseModel): image_id: Optional[str] = Field( None, description='Unique identifier for the generated image' ) @@ -1198,7 +1207,7 @@ class RecraftImageGenerationResponse(BaseModel): ..., description='Unix timestamp when the generation was created' ) credits: int = Field(..., description='Number of credits used for the generation') - data: List[Datum1] = Field(..., description='Array of generated image information') + data: List[Datum2] = Field(..., description='Array of generated image information') class StorageFile(BaseModel): diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 9603221b2e3..0512d9e3991 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -138,6 +138,7 @@ def request( request_headers = self.get_headers() if headers: request_headers.update(headers) + logging.debug(f"[DEBUG] Request Headers: {request_headers}") try: response = requests.request( method=method, @@ -220,7 +221,7 @@ def __init__( self, endpoint: ApiEndpoint[T, R], request: T, - api_base: str = "https://api.comfy.org", + api_base: str = "https://stagingapi.comfy.org", auth_token: Optional[str] = None, timeout: float = 30.0, verify_ssl: bool = True, diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 600c1bc5f2f..386b9c670df 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -7,6 +7,7 @@ IdeogramGenerateResponse, ImageRequest, OpenAIImageGenerationRequest, + OpenAIImageGenerationResponse ) from comfy_api_nodes.apis.client import ApiEndpoint, HttpMethod, SynchronousOperation @@ -214,6 +215,9 @@ def INPUT_TYPES(cls) -> InputTypeDict: "tooltip": "Optional random seed", }), }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" + } } RETURN_TYPES = (IO.IMAGE,) @@ -222,7 +226,7 @@ def INPUT_TYPES(cls) -> InputTypeDict: DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call(self, prompt, model, n=1, size="1024x1024", seed=0): + def api_call(self, prompt, model, n=1, size="1024x1024", seed=0, auth_token=None): # Validate size based on model if model == "dall-e-2": if size == "auto": @@ -251,7 +255,7 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0): path="/proxy/openai/images/generations", method=HttpMethod.POST, request_model=OpenAIImageGenerationRequest, - response_model=None + response_model=OpenAIImageGenerationResponse ), request=OpenAIImageGenerationRequest( model=model, @@ -260,20 +264,19 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0): size=size, seed=seed if seed != 0 else None ), + auth_token=auth_token ) response = operation.execute() # validate raw JSON response - if not isinstance(response, dict) or 'data' not in response: - raise Exception("Invalid response format from OpenAI endpoint") - data = response['data'] + data = response.data if not data or len(data) == 0: raise Exception("No images returned from OpenAI endpoint") # Get base64 image data - b64_data = data[0].get('b64_json') + b64_data = data[0].b64_json if not b64_data: raise Exception("No image data in OpenAI response") @@ -292,7 +295,7 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0): # NOTE: names should be globally unique NODE_CLASS_MAPPINGS = { "IdeogramTextToImage": IdeogramTextToImage, - "OpenAIDalleTextToImage": OpenAIDalleTextToImage, + "OpenAIDalleTextToImage": OpenAITextToImage, } # A dictionary that contains the friendly/humanly readable titles for the nodes From 1fa9ced75f5ab46cf962103ec664b6482de2200d Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 02:48:34 -0700 Subject: [PATCH 21/40] convert image to tensor. --- comfy_api_nodes/nodes_api.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 386b9c670df..07728a91ead 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -248,6 +248,7 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0, auth_token=None import numpy as np import torch from PIL import Image + import requests # build the operation operation = SynchronousOperation( @@ -276,20 +277,23 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0, auth_token=None raise Exception("No images returned from OpenAI endpoint") # Get base64 image data - b64_data = data[0].b64_json - if not b64_data: - raise Exception("No image data in OpenAI response") - - # decode base64 to image - import base64 - img_data = base64.b64decode(b64_data) - img = Image.open(io.BytesIO(img_data)).convert("RGB") - - # Convert to tensor - arr = np.array(img).astype(np.float32) / 255.0 - tensor = torch.from_numpy(arr)[None, ...] # add batch dimension + image_url = data[0].url + if not image_url: + raise Exception("No image URL was generated in the response") + img_response = requests.get(image_url) + if img_response.status_code != 200: + raise Exception("Failed to download the image") - return (tensor,) + img = Image.open(io.BytesIO(img_response.content)) + img = img.convert("RGB") # Ensure RGB format + + # Convert to numpy array, normalize to float32 between 0 and 1 + img_array = np.array(img).astype(np.float32) / 255.0 + + # Convert to torch tensor and add batch dimension + img_tensor = torch.from_numpy(img_array)[None,] + + return (img_tensor,) # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique From daf1fab89b79d5d613de9e69e56bb1f7d5c14f92 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 02:57:21 -0700 Subject: [PATCH 22/40] Improve types. --- comfy_api_nodes/apis/PixverseController.py | 2 +- comfy_api_nodes/apis/PixverseDto.py | 2 +- comfy_api_nodes/apis/__init__.py | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py index db49134fe5c..f3a0ca08941 100644 --- a/comfy_api_nodes/apis/PixverseController.py +++ b/comfy_api_nodes/apis/PixverseController.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T09:45:21+00:00 +# timestamp: 2025-04-22T09:57:11+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py index 36e7b090ee5..ce4779b0ab7 100644 --- a/comfy_api_nodes/apis/PixverseDto.py +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T09:45:21+00:00 +# timestamp: 2025-04-22T09:57:11+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index fbf557bbfac..15a12bb0f81 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T09:45:21+00:00 +# timestamp: 2025-04-22T09:57:11+00:00 from __future__ import annotations @@ -1141,6 +1141,7 @@ class OpenAIImageGenerationRequest(BaseModel): class Datum1(BaseModel): b64_json: Optional[str] = Field(None, description='Base64 encoded image data') + revised_prompt: Optional[str] = Field(None, description='Revised prompt') url: Optional[str] = Field(None, description='URL of the image') From 3ae7ecfe828a9e54d20c1102c0b14511b414dcfc Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 10:20:50 -0700 Subject: [PATCH 23/40] Ruff. --- comfy_api_nodes/nodes_api.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 07728a91ead..b8a36147094 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -100,7 +100,6 @@ def INPUT_TYPES(cls) -> InputTypeDict: def api_call(self, prompt, model, aspect_ratio=None, resolution=None, magic_prompt_option="AUTO", seed=0, style_type="NONE", negative_prompt="", num_images=1, color_palette="", auth_token=None): - import io import numpy as np import requests @@ -237,13 +236,12 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0, auth_token=None elif model == "dall-e-3": if size == "auto": size = "1024x1024" - valid_sizes = ["1024x1024", "1792x1024", "1024x1792"] + valid_sizes = ["1024x1024", "1792x1024", "1024x1792"] if size not in valid_sizes: raise ValueError(f"Size {size} not valid for dall-e-3. Must be one of: {', '.join(valid_sizes)}") # TODO: add NEW MODEL - import io import numpy as np import torch @@ -271,7 +269,7 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0, auth_token=None response = operation.execute() # validate raw JSON response - + data = response.data if not data or len(data) == 0: raise Exception("No images returned from OpenAI endpoint") From 429dfb75870633a000282fb3214fc3ffb42021b3 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 13:45:23 -0700 Subject: [PATCH 24/40] Push tests. --- comfy_api_nodes/apis/PixverseController.py | 2 +- comfy_api_nodes/apis/PixverseDto.py | 2 +- comfy_api_nodes/apis/__init__.py | 40 +++++++++++++++++----- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py index f3a0ca08941..9095db2f85e 100644 --- a/comfy_api_nodes/apis/PixverseController.py +++ b/comfy_api_nodes/apis/PixverseController.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T09:57:11+00:00 +# timestamp: 2025-04-22T20:42:39+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py index ce4779b0ab7..35a0a7f1ba7 100644 --- a/comfy_api_nodes/apis/PixverseDto.py +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T09:57:11+00:00 +# timestamp: 2025-04-22T20:42:39+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 15a12bb0f81..c6951095fe2 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T09:57:11+00:00 +# timestamp: 2025-04-22T20:42:39+00:00 from __future__ import annotations @@ -1058,6 +1058,34 @@ class NodeVersionUpdateRequest(BaseModel): ) +class Quality(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + + +class OpenAIImageEditRequest(BaseModel): + image: Union[bytes_aliased, List[bytes_aliased]] = Field( + ..., + description='Image(s) to edit. For DALL-E 2, only a single image is supported. For gpt-image-1, multiple images can be provided using image[] notation in form data.', + ) + mask: Optional[bytes_aliased] = Field( + None, + description='An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited', + ) + model: str = Field( + ..., description='The model to use for image editing', examples=['gpt-image-1'] + ) + prompt: str = Field( + ..., + description='A text description of the desired edit', + examples=['Give the rocketship rainbow coloring'], + ) + quality: Optional[Quality] = Field( + None, description='The quality of the edited image', examples=['low'] + ) + + class Background(str, Enum): transparent = 'transparent' opaque = 'opaque' @@ -1074,12 +1102,6 @@ class OutputFormat(str, Enum): jpeg = 'jpeg' -class Quality(str, Enum): - low = 'low' - medium = 'medium' - high = 'high' - - class ResponseFormat(str, Enum): url = 'url' b64_json = 'b64_json' @@ -1104,11 +1126,11 @@ class OpenAIImageGenerationRequest(BaseModel): ) n: Optional[int] = Field( None, - description='The number of images to generate (1–10). Only 1 supported for dall-e-3.', + description='The number of images to generate (1-10). Only 1 supported for dall-e-3.', examples=[1], ) output_compression: Optional[int] = Field( - None, description='Compression level for JPEG or WebP (0–100)', examples=[100] + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] ) output_format: Optional[OutputFormat] = Field( None, description='Format of the output image', examples=['png'] From 79c85a945211ce5eeb993ea047464ce705fa05e4 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 15:44:23 -0700 Subject: [PATCH 25/40] Handle multi-form data. - Don't set content-type for multi-part/form - Use data field instead of JSON --- comfy_api_nodes/apis/client.py | 67 ++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 12 deletions(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 0512d9e3991..606bce5698a 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -138,18 +138,42 @@ def request( request_headers = self.get_headers() if headers: request_headers.update(headers) + + # Let requests handle the content type when files are present. + if files: + del request_headers["Content-Type"] + logging.debug(f"[DEBUG] Request Headers: {request_headers}") + logging.debug(f"[DEBUG] Files: {files}") + logging.debug(f"[DEBUG] Params: {params}") + logging.debug(f"[DEBUG] Json: {json}") + try: - response = requests.request( - method=method, - url=url, - params=params, - json=json, - files=files, - headers=request_headers, - timeout=self.timeout, - verify=self.verify_ssl, - ) + # If files are present, use data parameter instead of json + if files: + form_data = {} + if json: + form_data.update(json) + response = requests.request( + method=method, + url=url, + params=params, + data=form_data, # Use data instead of json + files=files, + headers=request_headers, + timeout=self.timeout, + verify=self.verify_ssl, + ) + else: + response = requests.request( + method=method, + url=url, + params=params, + json=json, + headers=request_headers, + timeout=self.timeout, + verify=self.verify_ssl, + ) # Raise exception for error status codes response.raise_for_status() @@ -166,7 +190,24 @@ def request( except requests.HTTPError as e: status_code = e.response.status_code if hasattr(e, "response") else None error_message = f"HTTP Error: {str(e)}" + + # Try to extract detailed error message from JSON response + try: + if hasattr(e, "response") and e.response.content: + error_json = e.response.json() + if "error" in error_json and "message" in error_json["error"]: + error_message = f"API Error: {error_json['error']['message']}" + if "type" in error_json["error"]: + error_message += f" (Type: {error_json['error']['type']})" + else: + error_message = f"API Error: {error_json}" + except Exception as json_error: + # If we can't parse the JSON, fall back to the original error message + logging.debug(f"[DEBUG] Failed to parse error response: {str(json_error)}") + logging.debug(f"[DEBUG] API Error: {error_message} (Status: {status_code})") + if hasattr(e, "response") and e.response.content: + logging.debug(f"[DEBUG] Response content: {e.response.content}") if status_code == 401: error_message = "Unauthorized: Please login first to use this node." if status_code == 402: @@ -221,9 +262,10 @@ def __init__( self, endpoint: ApiEndpoint[T, R], request: T, + files: Optional[Dict[str, Any]] = None, api_base: str = "https://stagingapi.comfy.org", auth_token: Optional[str] = None, - timeout: float = 30.0, + timeout: float = 60.0, verify_ssl: bool = True, ): self.endpoint = endpoint @@ -234,7 +276,7 @@ def __init__( self.auth_token = auth_token self.timeout = timeout self.verify_ssl = verify_ssl - + self.files = files def execute(self, client: Optional[ApiClient] = None) -> R: """Execute the API operation using the provided client or create one""" try: @@ -263,6 +305,7 @@ def execute(self, client: Optional[ApiClient] = None) -> R: path=self.endpoint.path, json=request_dict, params=self.endpoint.query_params, + files=self.files, ) # Debug log for response From 9bf1fc71d766c2abba525cd477e69c146f68bf78 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 19:31:09 -0700 Subject: [PATCH 26/40] Change to api.comfy.org --- .github/workflows/update-api-stubs.yml | 2 +- comfy_api_nodes/apis/client.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/update-api-stubs.yml b/.github/workflows/update-api-stubs.yml index b27f2c58064..2ae99b67378 100644 --- a/.github/workflows/update-api-stubs.yml +++ b/.github/workflows/update-api-stubs.yml @@ -25,7 +25,7 @@ jobs: - name: Generate API models run: | - datamodel-codegen --use-subclass-enum --url https://stagingapi.comfy.org/openapi --output comfy_api_nodes/apis --output-model-type pydantic_v2.BaseModel + datamodel-codegen --use-subclass-enum --url https://api.comfy.org/openapi --output comfy_api_nodes/apis --output-model-type pydantic_v2.BaseModel - name: Check for changes id: git-check diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 606bce5698a..c10ff70178b 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -147,7 +147,7 @@ def request( logging.debug(f"[DEBUG] Files: {files}") logging.debug(f"[DEBUG] Params: {params}") logging.debug(f"[DEBUG] Json: {json}") - + try: # If files are present, use data parameter instead of json if files: @@ -212,6 +212,8 @@ def request( error_message = "Unauthorized: Please login first to use this node." if status_code == 402: error_message = "Payment Required: Please add credits to your account to use this node." + if status_code == 429: + error_message = "Rate Limit Exceeded: Please try again later." raise Exception(error_message) # Parse and return JSON response @@ -263,7 +265,7 @@ def __init__( endpoint: ApiEndpoint[T, R], request: T, files: Optional[Dict[str, Any]] = None, - api_base: str = "https://stagingapi.comfy.org", + api_base: str = "https://api.comfy.org", auth_token: Optional[str] = None, timeout: float = 60.0, verify_ssl: bool = True, From 7c41723ef355f3d4587edb1e5f04d2924e6ab85b Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 19:32:47 -0700 Subject: [PATCH 27/40] Handle error code 409. --- comfy_api_nodes/apis/client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index c10ff70178b..cd81d5a1d35 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -212,6 +212,8 @@ def request( error_message = "Unauthorized: Please login first to use this node." if status_code == 402: error_message = "Payment Required: Please add credits to your account to use this node." + if status_code == 409: + error_message = "There is a problem with your account. Please contact support@comfy.org. " if status_code == 429: error_message = "Rate Limit Exceeded: Please try again later." raise Exception(error_message) From 8d414a99de3f68523c56efa6c11c7e0269daab73 Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 08:59:02 -0700 Subject: [PATCH 28/40] separate out nodes per openai model --- comfy_api_nodes/apis/PixverseController.py | 4 +- comfy_api_nodes/apis/PixverseDto.py | 4 +- comfy_api_nodes/apis/__init__.py | 86 +++-- comfy_api_nodes/nodes_api.py | 379 +++++++++++++++++---- 4 files changed, 379 insertions(+), 94 deletions(-) diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py index 9095db2f85e..29a3ab33bf9 100644 --- a/comfy_api_nodes/apis/PixverseController.py +++ b/comfy_api_nodes/apis/PixverseController.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: -# filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T20:42:39+00:00 +# filename: https://api.comfy.org/openapi +# timestamp: 2025-04-23T15:56:33+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py index 35a0a7f1ba7..39951221440 100644 --- a/comfy_api_nodes/apis/PixverseDto.py +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: -# filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T20:42:39+00:00 +# filename: https://api.comfy.org/openapi +# timestamp: 2025-04-23T15:56:33+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index c6951095fe2..ee1be7130ad 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: -# filename: http://localhost:8080/openapi -# timestamp: 2025-04-22T20:42:39+00:00 +# filename: https://api.comfy.org/openapi +# timestamp: 2025-04-23T15:56:33+00:00 from __future__ import annotations @@ -874,9 +874,8 @@ class GenerationType3(str, Enum): class LumaVideoModel(str, Enum): - ray_1_6 = 'ray-1-6' ray_2 = 'ray-2' - ray_flash_2 = 'ray-flash-2' + ray_2_flash = 'ray-2-flash' class LumaVideoModelOutputDuration1(str, Enum): @@ -1058,6 +1057,22 @@ class NodeVersionUpdateRequest(BaseModel): ) +class Background(str, Enum): + transparent = 'transparent' + opaque = 'opaque' + + +class Moderation(str, Enum): + low = 'low' + auto = 'auto' + + +class OutputFormat(str, Enum): + png = 'png' + webp = 'webp' + jpeg = 'jpeg' + + class Quality(str, Enum): low = 'low' medium = 'medium' @@ -1065,17 +1080,24 @@ class Quality(str, Enum): class OpenAIImageEditRequest(BaseModel): - image: Union[bytes_aliased, List[bytes_aliased]] = Field( - ..., - description='Image(s) to edit. For DALL-E 2, only a single image is supported. For gpt-image-1, multiple images can be provided using image[] notation in form data.', - ) - mask: Optional[bytes_aliased] = Field( - None, - description='An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited', + background: Optional[Background] = Field( + None, description='Background transparency', examples=['opaque'] ) model: str = Field( ..., description='The model to use for image editing', examples=['gpt-image-1'] ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + n: Optional[int] = Field( + None, description='The number of images to generate', examples=[1] + ) + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] + ) + output_format: Optional[OutputFormat] = Field( + None, description='Format of the output image', examples=['png'] + ) prompt: str = Field( ..., description='A text description of the desired edit', @@ -1084,22 +1106,22 @@ class OpenAIImageEditRequest(BaseModel): quality: Optional[Quality] = Field( None, description='The quality of the edited image', examples=['low'] ) + size: Optional[str] = Field( + None, description='Size of the output image', examples=['1024x1024'] + ) + user: Optional[str] = Field( + None, + description='A unique identifier for end-user monitoring', + examples=['user-1234'], + ) -class Background(str, Enum): - transparent = 'transparent' - opaque = 'opaque' - - -class Moderation(str, Enum): +class Quality1(str, Enum): low = 'low' - auto = 'auto' - - -class OutputFormat(str, Enum): - png = 'png' - webp = 'webp' - jpeg = 'jpeg' + medium = 'medium' + high = 'high' + standard = 'standard' + hd = 'hd' class ResponseFormat(str, Enum): @@ -1117,9 +1139,7 @@ class OpenAIImageGenerationRequest(BaseModel): None, description='Background transparency', examples=['opaque'] ) model: Optional[str] = Field( - None, - description='The model to use for image generation', - examples=['gpt-image-1'], + None, description='The model to use for image generation', examples=['dall-e-3'] ) moderation: Optional[Moderation] = Field( None, description='Content moderation setting', examples=['auto'] @@ -1140,7 +1160,7 @@ class OpenAIImageGenerationRequest(BaseModel): description='A text description of the desired image', examples=['Draw a rocket in front of a blackhole in deep space'], ) - quality: Optional[Quality] = Field( + quality: Optional[Quality1] = Field( None, description='The quality of the generated image', examples=['high'] ) response_format: Optional[ResponseFormat] = Field( @@ -1500,18 +1520,18 @@ class KlingAuthenticationError(KlingErrorResponse): class LumaGenerationRequest(BaseModel): - aspect_ratio: Optional[LumaAspectRatio] = '16:9' + aspect_ratio: LumaAspectRatio callback_url: Optional[AnyUrl] = Field( None, description='The callback URL of the generation, a POST request with Generation object will be sent to the callback URL when the generation is dreaming, completed, or failed', ) - duration: Optional[LumaVideoModelOutputDuration] = None + duration: LumaVideoModelOutputDuration generation_type: Optional[GenerationType1] = 'video' keyframes: Optional[LumaKeyframes] = None loop: Optional[bool] = Field(None, description='Whether to loop the video') - model: Optional[LumaVideoModel] = 'ray-1-6' - prompt: Optional[str] = Field(None, description='The prompt of the generation') - resolution: Optional[LumaVideoModelOutputResolution] = None + model: LumaVideoModel + prompt: str = Field(..., description='The prompt of the generation') + resolution: LumaVideoModelOutputResolution class CharacterRef(BaseModel): diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index b8a36147094..3a060733631 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -1,16 +1,60 @@ import io from inspect import cleandoc +from comfy.utils import common_upscale from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict from comfy_api_nodes.apis import ( IdeogramGenerateRequest, IdeogramGenerateResponse, ImageRequest, OpenAIImageGenerationRequest, + OpenAIImageEditRequest, OpenAIImageGenerationResponse ) from comfy_api_nodes.apis.client import ApiEndpoint, HttpMethod, SynchronousOperation +import numpy as np +from PIL import Image +import requests +import torch +import math + +def downscale_input(image): + samples = image.movedim(-1,1) + #downscaling input images to roughly the same size as the outputs + total = int(1024 * 1024) + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + if scale_by >= 1: + return (image,) + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) + + s = common_upscale(samples, width, height, "lanczos", "disabled") + s = s.movedim(1,-1) + return s + +def validate_and_cast_response (response): + # validate raw JSON response + data = response.data + if not data or len(data) == 0: + raise Exception("No images returned from API endpoint") + + # Get base64 image data + image_url = data[0].url + if not image_url: + raise Exception("No image URL was generated in the response") + img_response = requests.get(image_url) + if img_response.status_code != 200: + raise Exception("Failed to download the image") + + img = Image.open(io.BytesIO(img_response.content)) + img = img.convert("RGB") # Ensure RGB format + + # Convert to numpy array, normalize to float32 between 0 and 1 + img_array = np.array(img).astype(np.float32) / 255.0 + + # Convert to torch tensor and add batch dimension + return torch.from_numpy(img_array)[None,] class IdeogramTextToImage(ComfyNodeABC): """ @@ -165,11 +209,11 @@ def api_call(self, prompt, model, aspect_ratio=None, resolution=None, #def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen): # return "" -class OpenAITextToImage(ComfyNodeABC): +class OpenAIDalle2(ComfyNodeABC): """ - Generates images synchronously via OpenAI's DALL·E 3 endpoint. + Generates images synchronously via OpenAI's DALL·E 2 endpoint. - Uses the proxy at /proxy/dalle-3/generate. Returned URLs are short‑lived, + Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, so download or cache results if you need to keep them. """ def __init__(self): @@ -184,14 +228,21 @@ def INPUT_TYPES(cls) -> InputTypeDict: "default": "", "tooltip": "Text prompt for DALL·E", }), - # TODO: add NEW MODEL - "model": (IO.COMBO, { - "options": ["dall-e-3", "dall-e-2"], - "default": "dall-e-3", - "tooltip": "OpenAI model name", - }), }, "optional": { + "seed": (IO.INT, { + "default": 0, + "min": 0, + "max": 2**31-1, + "step": 1, + "display": "number", + "tooltip": "not implemented yet in backend", + }), + "size": (IO.COMBO, { + "options": ["256x256", "512x512", "1024x1024"], + "default": "1024x1024", + "tooltip": "Image size", + }), "n": (IO.INT, { "default": 1, "min": 1, @@ -200,18 +251,126 @@ def INPUT_TYPES(cls) -> InputTypeDict: "display": "number", "tooltip": "How many images to generate", }), - "size": (IO.COMBO, { - "options": ["256x256", "512x512", "1024x1792", "1792x1024", "1024x1024", "1536x1024", "1024x1536", "auto"], - "default": "auto", - "tooltip": "Image size", + "image": (IO.IMAGE, { + "default": None, + "tooltip": "Optional reference image for image editing.", + }), + "mask": (IO.MASK, { + "default": None, + "tooltip": "Optional mask for inpainting (white areas will be replaced)", }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" + } + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "Example" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call(self, prompt, seed=0, image=None, mask=None, n=1, size="1024x1024", auth_token=None): + model = "dall-e-2" + path = "/proxy/openai/images/generations" + request_class = OpenAIImageGenerationRequest + img_binary = None + + if image is not None and mask is not None: + path = "/proxy/openai/images/edits" + request_class = OpenAIImageEditRequest + + input_tensor = image.squeeze().cpu() + height, width, channels = input_tensor.shape + rgba_tensor = torch.ones(height, width, 4, device="cpu") + rgba_tensor[:, :, :channels] = input_tensor + + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + rgba_tensor[:,:,3] = (1-mask.squeeze().cpu()) + + rgba_tensor = downscale_input(rgba_tensor.unsqueeze(0)).squeeze() + + image_np = (rgba_tensor.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format='PNG') + img_byte_arr.seek(0) + img_binary = img_byte_arr#.getvalue() + img_binary.name = "image.png" + elif image is not None or mask is not None: + raise Exception("Dall-E 2 image editing requires an image AND a mask") + + # Build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=request_class, + response_model=OpenAIImageGenerationResponse + ), + request=request_class( + model=model, + prompt=prompt, + n=n, + size=size, + seed=seed, + ), + files={ + "image": img_binary, + } if img_binary else None, + auth_token=auth_token + ) + + response = operation.execute() + + img_tensor = validate_and_cast_response(response) + return (img_tensor,) + +class OpenAIDalle3(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's DALL·E 3 endpoint. + + Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, + so download or cache results if you need to keep them. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": (IO.STRING, { + "multiline": True, + "default": "", + "tooltip": "Text prompt for DALL·E", + }), + }, + "optional": { "seed": (IO.INT, { "default": 0, "min": 0, "max": 2**31-1, "step": 1, "display": "number", - "tooltip": "Optional random seed", + "tooltip": "not implemented yet in backend", + }), + "quality" : (IO.COMBO, { + "options": ["standard","hd"], + "default": "standard", + "tooltip": "Image quality", + }), + "style": (IO.COMBO, { + "options": ["natural","vivid"], + "default": "natural", + "tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", + }), + "size": (IO.COMBO, { + "options": ["1024x1024", "1024x1792", "1792x1024"], + "default": "1024x1024", + "tooltip": "Image size", }), }, "hidden": { @@ -225,28 +384,8 @@ def INPUT_TYPES(cls) -> InputTypeDict: DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call(self, prompt, model, n=1, size="1024x1024", seed=0, auth_token=None): - # Validate size based on model - if model == "dall-e-2": - if size == "auto": - size = "1024x1024" - valid_sizes = ["256x256", "512x512", "1024x1024"] - if size not in valid_sizes: - raise ValueError(f"Size {size} not valid for dall-e-2. Must be one of: {', '.join(valid_sizes)}") - elif model == "dall-e-3": - if size == "auto": - size = "1024x1024" - valid_sizes = ["1024x1024", "1792x1024", "1024x1792"] - if size not in valid_sizes: - raise ValueError(f"Size {size} not valid for dall-e-3. Must be one of: {', '.join(valid_sizes)}") - # TODO: add NEW MODEL - - - - import numpy as np - import torch - from PIL import Image - import requests + def api_call(self, prompt, seed=0, style="natural", quality="standard", size="1024x1024", auth_token=None): + model = "dall-e-3" # build the operation operation = SynchronousOperation( @@ -259,49 +398,175 @@ def api_call(self, prompt, model, n=1, size="1024x1024", seed=0, auth_token=None request=OpenAIImageGenerationRequest( model=model, prompt=prompt, - n=n, + quality=quality, size=size, - seed=seed if seed != 0 else None + style=style, + seed=seed, ), auth_token=auth_token ) response = operation.execute() - # validate raw JSON response + img_tensor = validate_and_cast_response(response) + return (img_tensor,) - data = response.data - if not data or len(data) == 0: - raise Exception("No images returned from OpenAI endpoint") +class OpenAIXXX(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's DALL·E 2 endpoint. - # Get base64 image data - image_url = data[0].url - if not image_url: - raise Exception("No image URL was generated in the response") - img_response = requests.get(image_url) - if img_response.status_code != 200: - raise Exception("Failed to download the image") + Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, + so download or cache results if you need to keep them. + """ + def __init__(self): + pass - img = Image.open(io.BytesIO(img_response.content)) - img = img.convert("RGB") # Ensure RGB format + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": (IO.STRING, { + "multiline": True, + "default": "", + "tooltip": "Text prompt for XXX", + }), + }, + "optional": { + "seed": (IO.INT, { + "default": 0, + "min": 0, + "max": 2**31-1, + "step": 1, + "display": "number", + "tooltip": "not implemented yet in backend", + }), + "quality": (IO.COMBO, { + "options": ["low","medium","high"], + "default": "low", + "tooltip": "Image quality, affects cost and generation time.", + }), + "background": (IO.COMBO, { + "options": ["opaque","transparent"], + "default": "opaque", + "tooltip": "Return image with or without background", + }), + "size": (IO.COMBO, { + "options": ["auto", "1024x1024", "1024x1536", "1536x1024"], + "default": "auto", + "tooltip": "Image size", + }), + "n": (IO.INT, { + "default": 1, + "min": 1, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "How many images to generate", + }), + "image": (IO.IMAGE, { + "default": None, + "tooltip": "Optional reference image for image editing.", + }), + "mask": (IO.MASK, { + "default": None, + "tooltip": "Optional mask for inpainting (white areas will be replaced)", + }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" + } + } - # Convert to numpy array, normalize to float32 between 0 and 1 - img_array = np.array(img).astype(np.float32) / 255.0 + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "Example" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True - # Convert to torch tensor and add batch dimension - img_tensor = torch.from_numpy(img_array)[None,] + def api_call(self, prompt, seed=0, quality="low", background="opaque", image=None, mask=None, n=1, size="1024x1024", auth_token=None): + model = "xxx" + path = "/proxy/openai/images/generations" + request_class = OpenAIImageGenerationRequest + img_binary = None + mask_binary = None + + + if image is not None: + path = "/proxy/openai/images/edits" + request_class = OpenAIImageEditRequest + + scaled_image = downscale_input(image).squeeze() + + image_np = (scaled_image.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format='PNG') + img_byte_arr.seek(0) + img_binary = img_byte_arr#.getvalue() + img_binary.name = "image.png" + + if mask is not None: + if image is None: + raise Exception("Cannot use a mask without an input image") + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + batch, height, width = mask.shape + rgba_mask = torch.zeros(height, width, 4, device="cpu") + rgba_mask[:,:,3] = (1-mask.squeeze().cpu()) + mask_np = (rgba_mask.numpy() * 255).astype(np.uint8) + mask_img = Image.fromarray(mask_np) + mask_img_byte_arr = io.BytesIO() + mask_img.save(mask_img_byte_arr, format='PNG') + mask_img_byte_arr.seek(0) + mask_binary = mask_img_byte_arr#.getvalue() + mask_binary.name = "mask.png" + + files = {} + if img_binary: + files["image"] = img_binary + if mask_binary: + files["mask"] = mask_binary + + # Build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=request_class, + response_model=OpenAIImageGenerationResponse + ), + request=request_class( + model=model, + prompt=prompt, + quality=quality, + background=background, + n=n, + seed=seed, + size=size, + ), + files=files if files else None, + auth_token=auth_token + ) + + response = operation.execute() + img_tensor = validate_and_cast_response(response) return (img_tensor,) + # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique NODE_CLASS_MAPPINGS = { "IdeogramTextToImage": IdeogramTextToImage, - "OpenAIDalleTextToImage": OpenAITextToImage, + "OpenAIDalle2": OpenAIDalle2, + "OpenAIDalle3": OpenAIDalle3, + "OpenAIXXX": OpenAIXXX, } # A dictionary that contains the friendly/humanly readable titles for the nodes NODE_DISPLAY_NAME_MAPPINGS = { "IdeogramTextToImage": "Ideogram Text to Image", - "OpenAIDalleTextToImage": "OpenAI DALL·E 3 Text to Image", + "OpenAIDalle2": "OpenAI DALL·E 2", + "OpenAIDalle3": "OpenAI DALL·E 3", + "OpenAIXXX": "XXX", } From ffb63a63dda46475b777288ef6c3daca812fa5dd Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Wed, 23 Apr 2025 09:17:35 -0700 Subject: [PATCH 29/40] Update error message. --- comfy_api_nodes/apis/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index cd81d5a1d35..9bc3d76d51d 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -226,7 +226,7 @@ def request( def check_auth_token(self, auth_token): """Verify that an auth token is present.""" if auth_token is None: - raise Exception("Please login first to use this node.") + raise Exception("Unauthorized: Please login first to use this node.") return auth_token From 68838ea6f2a75170896fa7fa34f1375db2f823f4 Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 09:25:05 -0700 Subject: [PATCH 30/40] fix wrong output type --- comfy_api_nodes/nodes_api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 3a060733631..3d4d78d22f8 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -22,10 +22,10 @@ def downscale_input(image): samples = image.movedim(-1,1) #downscaling input images to roughly the same size as the outputs - total = int(1024 * 1024) + total = int(1536 * 1024) scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) if scale_by >= 1: - return (image,) + return image width = round(samples.shape[3] * scale_by) height = round(samples.shape[2] * scale_by) From e0bd2fda633d7b1bd953683d207f62a9873ce534 Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 09:28:25 -0700 Subject: [PATCH 31/40] re-categorize nodes, remove ideogram (for now) --- comfy_api_nodes/nodes_api.py | 159 +---------------------------------- 1 file changed, 3 insertions(+), 156 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 3d4d78d22f8..c9e89e83f6f 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -56,159 +56,6 @@ def validate_and_cast_response (response): # Convert to torch tensor and add batch dimension return torch.from_numpy(img_array)[None,] -class IdeogramTextToImage(ComfyNodeABC): - """ - Generates images synchronously based on a given prompt and optional parameters. - - Images links are available for a limited period of time; if you would like to keep the image, you must download it. - """ - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - """ - Return a dictionary which contains config for all input fields. - Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT". - Input types "INT", "STRING" or "FLOAT" are special values for fields on the node. - The type can be a list for selection. - - Returns: `dict`: - - Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required` - - Value input_fields (`dict`): Contains input fields config: - * Key field_name (`string`): Name of a entry-point method's argument - * Value field_config (`tuple`): - + First value is a string indicate the type of field or a list for selection. - + Secound value is a config for type "INT", "STRING" or "FLOAT". - """ - return { - "required": { - "prompt": (IO.STRING, { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }), - "model": (IO.COMBO, { "options": ["V_2", "V_2_TURBO", "V_1", "V_1_TURBO"], "default": "V_2", "tooltip": "Model to use for image generation"}), - }, - "optional": { - "aspect_ratio": (IO.COMBO, { "options": ["ASPECT_1_1", "ASPECT_4_3", "ASPECT_3_4", "ASPECT_16_9", "ASPECT_9_16", "ASPECT_2_1", "ASPECT_1_2", "ASPECT_3_2", "ASPECT_2_3", "ASPECT_4_5", "ASPECT_5_4"], "default": "ASPECT_1_1", "tooltip": "The aspect ratio for image generation. Cannot be used with resolution" - }), - "resolution": (IO.COMBO, { "options": ["1024x1024", "1024x1792", "1792x1024"], - "default": "1024x1024", - "tooltip": "The resolution for image generation (V2 only). Cannot be used with aspect_ratio" - }), - "magic_prompt_option": (IO.COMBO, { "options": ["AUTO", "ON", "OFF"], - "default": "AUTO", - "tooltip": "Determine if MagicPrompt should be used in generation" - }), - "seed": (IO.INT, { - "default": 0, - "min": 0, - "max": 2147483647, - "step": 1, - "display": "number" - }), - "style_type": (IO.COMBO, { "options": ["NONE", "ANIME", "CINEMATIC", "CREATIVE", "DIGITAL_ART", "PHOTOGRAPHIC"], - "default": "NONE", - "tooltip": "Style type for generation (V2+ only)" - }), - "negative_prompt": (IO.STRING, { - "multiline": True, - "default": "", - "tooltip": "Description of what to exclude from the image (V1/V2 only)" - }), - "num_images": (IO.INT, { - "default": 1, - "min": 1, - "max": 8, - "step": 1, - "display": "number" - }), - "color_palette": (IO.STRING, { - "multiline": False, - "default": "", - "tooltip": "Color palette preset name or hex colors with weights (V2/V2_TURBO only)" - }), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG" - } - } - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "Example" - - def api_call(self, prompt, model, aspect_ratio=None, resolution=None, - magic_prompt_option="AUTO", seed=0, style_type="NONE", - negative_prompt="", num_images=1, color_palette="", auth_token=None): - - import numpy as np - import requests - import torch - from PIL import Image - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/ideogram/generate", - method=HttpMethod.POST, - request_model=IdeogramGenerateRequest, - response_model=IdeogramGenerateResponse - ), - request=IdeogramGenerateRequest( - image_request=ImageRequest( - prompt=prompt, - model=model, - num_images=num_images, - seed=seed, - aspect_ratio=aspect_ratio if aspect_ratio != "ASPECT_1_1" else None, - resolution=resolution if resolution != "1024x1024" else None, - magic_prompt_option=magic_prompt_option if magic_prompt_option != "AUTO" else None, - style_type=style_type if style_type != "NONE" else None, - negative_prompt=negative_prompt if negative_prompt else None, - color_palette=None - ) - ), - auth_token=auth_token - ) - - response = operation.execute() - - if not response.data or len(response.data) == 0: - raise Exception("No images were generated in the response") - image_url = response.data[0].url - - if not image_url: - raise Exception("No image URL was generated in the response") - img_response = requests.get(image_url) - if img_response.status_code != 200: - raise Exception("Failed to download the image") - - img = Image.open(io.BytesIO(img_response.content)) - img = img.convert("RGB") # Ensure RGB format - - # Convert to numpy array, normalize to float32 between 0 and 1 - img_array = np.array(img).astype(np.float32) / 255.0 - - # Convert to torch tensor and add batch dimension - img_tensor = torch.from_numpy(img_array)[None,] - - return (img_tensor,) - - """ - The node will always be re executed if any of the inputs change but - this method can be used to force the node to execute again even when the inputs don't change. - You can make this node return a number or a string. This value will be compared to the one returned the last time the node was - executed, if it is different the node will be executed again. - This method is used in the core repo for the LoadImage node where they return the image hash as a string, if the image hash - changes between executions the LoadImage node is executed again. - """ - #@classmethod - #def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen): - # return "" - class OpenAIDalle2(ComfyNodeABC): """ Generates images synchronously via OpenAI's DALL·E 2 endpoint. @@ -267,7 +114,7 @@ def INPUT_TYPES(cls) -> InputTypeDict: RETURN_TYPES = (IO.IMAGE,) FUNCTION = "api_call" - CATEGORY = "Example" + CATEGORY = "api node" DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True @@ -380,7 +227,7 @@ def INPUT_TYPES(cls) -> InputTypeDict: RETURN_TYPES = (IO.IMAGE,) FUNCTION = "api_call" - CATEGORY = "Example" + CATEGORY = "api node" DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True @@ -479,7 +326,7 @@ def INPUT_TYPES(cls) -> InputTypeDict: RETURN_TYPES = (IO.IMAGE,) FUNCTION = "api_call" - CATEGORY = "Example" + CATEGORY = "api node" DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True From 553c8a305ced2b9c008352d9d643048813309e2f Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 09:32:37 -0700 Subject: [PATCH 32/40] oops, fix mappings --- comfy_api_nodes/nodes_api.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index c9e89e83f6f..875014d1073 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -404,7 +404,6 @@ def api_call(self, prompt, seed=0, quality="low", background="opaque", image=Non # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique NODE_CLASS_MAPPINGS = { - "IdeogramTextToImage": IdeogramTextToImage, "OpenAIDalle2": OpenAIDalle2, "OpenAIDalle3": OpenAIDalle3, "OpenAIXXX": OpenAIXXX, @@ -412,7 +411,6 @@ def api_call(self, prompt, seed=0, quality="low", background="opaque", image=Non # A dictionary that contains the friendly/humanly readable titles for the nodes NODE_DISPLAY_NAME_MAPPINGS = { - "IdeogramTextToImage": "Ideogram Text to Image", "OpenAIDalle2": "OpenAI DALL·E 2", "OpenAIDalle3": "OpenAI DALL·E 3", "OpenAIXXX": "XXX", From 4a7aca7af974b73bfe87001bdeeece865220d0bf Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 09:33:49 -0700 Subject: [PATCH 33/40] fix ruff --- comfy_api_nodes/nodes_api.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 875014d1073..b2e234f8cc1 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -4,9 +4,6 @@ from comfy.utils import common_upscale from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict from comfy_api_nodes.apis import ( - IdeogramGenerateRequest, - IdeogramGenerateResponse, - ImageRequest, OpenAIImageGenerationRequest, OpenAIImageEditRequest, OpenAIImageGenerationResponse From 8bd626423c8ac2e64c5003630b3328cd042b62a4 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Wed, 23 Apr 2025 09:35:19 -0700 Subject: [PATCH 34/40] Update frontend to 1.17.9 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 05fd2403c51..ac201c4d99c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.16.9 +comfyui-frontend-package==1.17.9 comfyui-workflow-templates==0.1.3 torch torchsde From 422d633c86cb45a6002e7a8a70b06c97c323761a Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 10:38:19 -0700 Subject: [PATCH 35/40] embargo lift rename nodes --- comfy_api_nodes/nodes_api.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index b2e234f8cc1..8a366157591 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -255,9 +255,9 @@ def api_call(self, prompt, seed=0, style="natural", quality="standard", size="10 img_tensor = validate_and_cast_response(response) return (img_tensor,) -class OpenAIXXX(ComfyNodeABC): +class OpenAIGPTImage1(ComfyNodeABC): """ - Generates images synchronously via OpenAI's DALL·E 2 endpoint. + Generates images synchronously via OpenAI's GPT Image 1 endpoint. Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, so download or cache results if you need to keep them. @@ -272,7 +272,7 @@ def INPUT_TYPES(cls) -> InputTypeDict: "prompt": (IO.STRING, { "multiline": True, "default": "", - "tooltip": "Text prompt for XXX", + "tooltip": "Text prompt for GPT Image 1", }), }, "optional": { @@ -328,7 +328,7 @@ def INPUT_TYPES(cls) -> InputTypeDict: API_NODE = True def api_call(self, prompt, seed=0, quality="low", background="opaque", image=None, mask=None, n=1, size="1024x1024", auth_token=None): - model = "xxx" + model = "gpt-image-1" path = "/proxy/openai/images/generations" request_class = OpenAIImageGenerationRequest img_binary = None @@ -403,12 +403,12 @@ def api_call(self, prompt, seed=0, quality="low", background="opaque", image=Non NODE_CLASS_MAPPINGS = { "OpenAIDalle2": OpenAIDalle2, "OpenAIDalle3": OpenAIDalle3, - "OpenAIXXX": OpenAIXXX, + "OpenAIGPTImage1": OpenAIGPTImage1, } # A dictionary that contains the friendly/humanly readable titles for the nodes NODE_DISPLAY_NAME_MAPPINGS = { "OpenAIDalle2": "OpenAI DALL·E 2", "OpenAIDalle3": "OpenAI DALL·E 3", - "OpenAIXXX": "XXX", + "OpenAIGPTImage1": "OpenAI GPT Image 1", } From f65b05e86592f4c9393fa41f4ded1e6cf7d687f6 Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 11:01:01 -0700 Subject: [PATCH 36/40] remove unused autogenerated model code --- comfy_api_nodes/apis/__init__.py | 584 ------------------------------- 1 file changed, 584 deletions(-) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index ee1be7130ad..4702e3a5839 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -11,35 +11,6 @@ from pydantic import AnyUrl, BaseModel, Field, RootModel, confloat, conint, constr - -class BFLFluxProGenerateRequest(BaseModel): - guidance_scale: Optional[confloat(ge=1.0, le=20.0)] = Field( - None, description='The guidance scale for generation.' - ) - height: conint(ge=64, le=2048) = Field( - ..., description='The height of the image to generate.' - ) - negative_prompt: Optional[str] = Field( - None, description='The negative prompt for image generation.' - ) - num_images: Optional[conint(ge=1, le=4)] = Field( - None, description='The number of images to generate.' - ) - num_inference_steps: Optional[conint(ge=1, le=100)] = Field( - None, description='The number of inference steps.' - ) - prompt: str = Field(..., description='The text prompt for image generation.') - seed: Optional[int] = Field(None, description='The seed value for reproducibility.') - width: conint(ge=64, le=2048) = Field( - ..., description='The width of the image to generate.' - ) - - -class BFLFluxProGenerateResponse(BaseModel): - id: str = Field(..., description='The unique identifier for the generation task.') - polling_url: str = Field(..., description='URL to poll for the generation result.') - - class ComfyNode(BaseModel): category: Optional[str] = Field( None, @@ -158,12 +129,6 @@ class ImageRequest(BaseModel): ) -class IdeogramGenerateRequest(BaseModel): - image_request: ImageRequest = Field( - ..., description='The image generation request parameters.' - ) - - class Datum(BaseModel): is_image_safe: Optional[bool] = Field( None, description='Indicates whether the image is considered safe.' @@ -184,15 +149,6 @@ class Datum(BaseModel): url: Optional[str] = Field(None, description='URL to the generated image.') -class IdeogramGenerateResponse(BaseModel): - created: Optional[datetime] = Field( - None, description='Timestamp when the generation was created.' - ) - data: Optional[List[Datum]] = Field( - None, description='Array of generated image information.' - ) - - class Code(Enum): int_1100 = 1100 int_1101 = 1101 @@ -256,35 +212,6 @@ class Mode(str, Enum): pro = 'pro' -class ModelName(str, Enum): - kling_v1 = 'kling-v1' - kling_v1_6 = 'kling-v1-6' - - -class KlingImage2VideoRequest(BaseModel): - aspect_ratio: Optional[AspectRatio] = '16:9' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - camera_control: Optional[CameraControl] = None - cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.5, description='Flexibility in video generation' - ) - duration: Optional[Duration] = 5 - external_task_id: Optional[str] = Field(None, description='Customized Task ID') - image_url: Optional[AnyUrl] = Field( - None, description='URL of the image to be used for video generation' - ) - mode: Optional[Mode] = Field('std', description='Video generation mode') - model_name: Optional[ModelName] = Field('kling-v1', description='Model Name') - negative_prompt: Optional[constr(max_length=2500)] = Field( - None, description='Negative text prompt' - ) - prompt: Optional[constr(max_length=2500)] = Field( - None, description='Positive text prompt' - ) - - class TaskInfo(BaseModel): external_task_id: Optional[str] = None @@ -315,13 +242,6 @@ class Data(BaseModel): updated_at: Optional[int] = Field(None, description='Task update time') -class KlingImage2VideoResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - data: Optional[Data] = None - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - - class AspectRatio1(str, Enum): field_16_9 = '16:9' field_9_16 = '9:16' @@ -338,38 +258,6 @@ class ImageReference(str, Enum): face = 'face' -class ModelName1(str, Enum): - kling_v1 = 'kling-v1' - kling_v1_5 = 'kling-v1-5' - - -class KlingImageGenerationsRequest(BaseModel): - aspect_ratio: Optional[AspectRatio1] = Field( - '16:9', description='Aspect ratio of the generated images' - ) - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - human_fidelity: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.45, description='Subject reference similarity' - ) - image: Optional[str] = Field( - None, description='Reference Image - Base64 encoded string or image URL' - ) - image_fidelity: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.5, description='Reference intensity for user-uploaded images' - ) - image_reference: Optional[ImageReference] = Field( - None, description='Image reference type' - ) - model_name: Optional[ModelName1] = Field('kling-v1', description='Model Name') - n: Optional[conint(ge=1, le=9)] = Field(1, description='Number of generated images') - negative_prompt: Optional[constr(max_length=200)] = Field( - None, description='Negative text prompt' - ) - prompt: constr(max_length=500) = Field(..., description='Positive text prompt') - - class Image(BaseModel): index: Optional[int] = Field(None, description='Image Number (0-9)') url: Optional[AnyUrl] = Field(None, description='URL for generated image') @@ -388,13 +276,6 @@ class Data1(BaseModel): updated_at: Optional[int] = Field(None, description='Task update time') -class KlingImageGenerationsResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - data: Optional[Data1] = None - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - - class AspectRatio2(str, Enum): field_16_9 = '16:9' field_9_16 = '9:16' @@ -411,33 +292,6 @@ class ModelName2(str, Enum): kling_v1_6 = 'kling-v1-6' -class KlingLipSyncRequest(BaseModel): - aspect_ratio: Optional[AspectRatio2] = '16:9' - audio_url: Optional[AnyUrl] = Field( - None, description='URL of the audio to be used for lip-syncing' - ) - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - camera_control: Optional[CameraControl1] = None - cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.5, description='Flexibility in video generation' - ) - duration: Optional[Duration] = 5 - external_task_id: Optional[str] = Field(None, description='Customized Task ID') - mode: Optional[Mode] = Field('std', description='Video generation mode') - model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') - negative_prompt: Optional[constr(max_length=2500)] = Field( - None, description='Negative text prompt' - ) - prompt: Optional[constr(max_length=2500)] = Field( - None, description='Positive text prompt' - ) - video_url: Optional[AnyUrl] = Field( - None, description='URL of the video to be lip-synced' - ) - - class TaskResult2(BaseModel): videos: Optional[List[Video]] = None @@ -451,13 +305,6 @@ class Data2(BaseModel): updated_at: Optional[int] = Field(None, description='Task update time') -class KlingLipSyncResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - data: Optional[Data2] = None - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - - class Code2(Enum): int_1200 = 1200 int_1201 = 1201 @@ -465,13 +312,6 @@ class Code2(Enum): int_1203 = 1203 -class KlingRequestError(KlingErrorResponse): - code: Optional[Code2] = Field( - None, - description='- 1200: Invalid request parameters\n- 1201: Invalid parameters\n- 1202: Invalid request method\n- 1203: Requested resource does not exist\n', - ) - - class ResourcePackType(str, Enum): decreasing_total = 'decreasing_total' constant_period = 'constant_period' @@ -515,29 +355,12 @@ class Data3(BaseModel): ) -class KlingResourcePackageResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code; 0 indicates success') - data: Optional[Data3] = None - message: Optional[str] = Field(None, description='Error information') - request_id: Optional[str] = Field( - None, - description='Request ID, generated by the system, used to track requests and troubleshoot problems', - ) - - class Code3(Enum): int_5000 = 5000 int_5001 = 5001 int_5002 = 5002 -class KlingServerError(KlingErrorResponse): - code: Optional[Code3] = Field( - None, - description='- 5000: Internal server error\n- 5001: Service temporarily unavailable\n- 5002: Server internal timeout\n', - ) - - class Code4(Enum): int_1300 = 1300 int_1301 = 1301 @@ -546,39 +369,11 @@ class Code4(Enum): int_1304 = 1304 -class KlingStrategyError(KlingErrorResponse): - code: Optional[Code4] = Field( - None, - description='- 1300: Trigger platform strategy\n- 1301: Trigger content security policy\n- 1302: API request too frequent\n- 1303: Concurrency/QPS exceeds limit\n- 1304: Trigger IP whitelist policy\n', - ) - - class CameraControl2(BaseModel): config: Optional[Config] = None type: Optional[Type] = Field(None, description='Predefined camera movements type') -class KlingText2VideoRequest(BaseModel): - aspect_ratio: Optional[AspectRatio2] = '16:9' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - camera_control: Optional[CameraControl2] = None - cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.5, description='Flexibility in video generation' - ) - duration: Optional[Duration] = 5 - external_task_id: Optional[str] = Field(None, description='Customized Task ID') - mode: Optional[Mode] = Field('std', description='Video generation mode') - model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') - negative_prompt: Optional[constr(max_length=2500)] = Field( - None, description='Negative text prompt' - ) - prompt: Optional[constr(max_length=2500)] = Field( - None, description='Positive text prompt' - ) - - class TaskResult3(BaseModel): videos: Optional[List[Video]] = None @@ -592,42 +387,11 @@ class Data4(BaseModel): updated_at: Optional[int] = Field(None, description='Task update time') -class KlingText2VideoResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - data: Optional[Data4] = None - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - - class CameraControl3(BaseModel): config: Optional[Config] = None type: Optional[Type] = Field(None, description='Predefined camera movements type') -class KlingVideoEffectsRequest(BaseModel): - aspect_ratio: Optional[AspectRatio2] = '16:9' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - camera_control: Optional[CameraControl3] = None - cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.5, description='Flexibility in video generation' - ) - duration: Optional[Duration] = 5 - external_task_id: Optional[str] = Field(None, description='Customized Task ID') - mode: Optional[Mode] = Field('std', description='Video generation mode') - model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') - negative_prompt: Optional[constr(max_length=2500)] = Field( - None, description='Negative text prompt' - ) - prompt: Optional[constr(max_length=2500)] = Field( - None, description='Positive text prompt' - ) - video_url: Optional[AnyUrl] = Field( - None, description='URL of the video to be used for effects generation' - ) - - class TaskResult4(BaseModel): videos: Optional[List[Video]] = None @@ -641,42 +405,11 @@ class Data5(BaseModel): updated_at: Optional[int] = Field(None, description='Task update time') -class KlingVideoEffectsResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - data: Optional[Data5] = None - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - - class CameraControl4(BaseModel): config: Optional[Config] = None type: Optional[Type] = Field(None, description='Predefined camera movements type') -class KlingVideoExtendRequest(BaseModel): - aspect_ratio: Optional[AspectRatio2] = '16:9' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - camera_control: Optional[CameraControl4] = None - cfg_scale: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.5, description='Flexibility in video generation' - ) - duration: Optional[Duration] = 5 - external_task_id: Optional[str] = Field(None, description='Customized Task ID') - mode: Optional[Mode] = Field('std', description='Video generation mode') - model_name: Optional[ModelName2] = Field('kling-v1', description='Model Name') - negative_prompt: Optional[constr(max_length=2500)] = Field( - None, description='Negative text prompt' - ) - prompt: Optional[constr(max_length=2500)] = Field( - None, description='Positive text prompt' - ) - video_url: Optional[AnyUrl] = Field( - None, description='URL of the video to be extended' - ) - - class TaskResult5(BaseModel): videos: Optional[List[Video]] = None @@ -690,34 +423,11 @@ class Data6(BaseModel): updated_at: Optional[int] = Field(None, description='Task update time') -class KlingVideoExtendResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - data: Optional[Data6] = None - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - - class ModelName6(str, Enum): kolors_virtual_try_on_v1 = 'kolors-virtual-try-on-v1' kolors_virtual_try_on_v1_5 = 'kolors-virtual-try-on-v1-5' -class KlingVirtualTryOnRequest(BaseModel): - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - cloth_image: Optional[str] = Field( - None, - description='Reference clothing image - Base64 encoded string or image URL', - ) - human_image: str = Field( - ..., description='Reference human image - Base64 encoded string or image URL' - ) - model_name: Optional[ModelName6] = Field( - 'kolors-virtual-try-on-v1', description='Model Name' - ) - - class Image1(BaseModel): index: Optional[int] = Field(None, description='Image Number') url: Optional[AnyUrl] = Field(None, description='URL for generated image') @@ -736,172 +446,26 @@ class Data7(BaseModel): updated_at: Optional[int] = Field(None, description='Task update time') -class KlingVirtualTryOnResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - data: Optional[Data7] = None - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - - -class LumaAspectRatio(str, Enum): - field_1_1 = '1:1' - field_16_9 = '16:9' - field_9_16 = '9:16' - field_4_3 = '4:3' - field_3_4 = '3:4' - field_21_9 = '21:9' - field_9_21 = '9:21' - - -class LumaAssets(BaseModel): - image: Optional[AnyUrl] = Field(None, description='The URL of the image') - progress_video: Optional[AnyUrl] = Field( - None, description='The URL of the progress video' - ) - video: Optional[AnyUrl] = Field(None, description='The URL of the video') - - class GenerationType(str, Enum): add_audio = 'add_audio' -class LumaAudioGenerationRequest(BaseModel): - callback_url: Optional[AnyUrl] = Field( - None, description='The callback URL for the audio' - ) - generation_type: Optional[GenerationType] = 'add_audio' - negative_prompt: Optional[str] = Field( - None, description='The negative prompt of the audio' - ) - prompt: Optional[str] = Field(None, description='The prompt of the audio') - - -class LumaError(BaseModel): - detail: Optional[str] = Field(None, description='The error message') - - class Type5(str, Enum): generation = 'generation' -class LumaGenerationReference(BaseModel): - id: UUID = Field(..., description='The ID of the generation') - type: Literal['generation'] - - class GenerationType1(str, Enum): video = 'video' -class LumaGenerationType(str, Enum): - video = 'video' - image = 'image' - - class GenerationType2(str, Enum): image = 'image' -class LumaImageIdentity(BaseModel): - images: Optional[List[AnyUrl]] = Field( - None, description='The URLs of the image identity' - ) - - -class LumaImageModel(str, Enum): - photon_1 = 'photon-1' - photon_flash_1 = 'photon-flash-1' - - -class LumaImageRef(BaseModel): - url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') - weight: Optional[float] = Field( - None, description='The weight of the image reference' - ) - - class Type6(str, Enum): image = 'image' -class LumaImageReference(BaseModel): - type: Literal['image'] - url: AnyUrl = Field(..., description='The URL of the image') - - -class LumaKeyframe(RootModel[Union[LumaGenerationReference, LumaImageReference]]): - root: Union[LumaGenerationReference, LumaImageReference] = Field( - ..., - description='A keyframe can be either a Generation reference, an Image, or a Video', - discriminator='type', - examples=[ - { - 'summary': 'Generation reference', - 'value': { - 'id': '123e4567-e89b-12d3-a456-426614174000', - 'type': 'generation', - }, - }, - { - 'summary': 'Image reference', - 'value': {'type': 'image', 'url': 'https://example.com/image.jpg'}, - }, - ], - ) - - -class LumaKeyframes(BaseModel): - frame0: Optional[LumaKeyframe] = None - frame1: Optional[LumaKeyframe] = None - - -class LumaModifyImageRef(BaseModel): - url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') - weight: Optional[float] = Field( - None, description='The weight of the modify image reference' - ) - - -class LumaState(str, Enum): - queued = 'queued' - dreaming = 'dreaming' - completed = 'completed' - failed = 'failed' - - -class GenerationType3(str, Enum): - upscale_video = 'upscale_video' - - -class LumaVideoModel(str, Enum): - ray_2 = 'ray-2' - ray_2_flash = 'ray-2-flash' - - -class LumaVideoModelOutputDuration1(str, Enum): - field_5s = '5s' - field_9s = '9s' - - -class LumaVideoModelOutputDuration( - RootModel[Union[LumaVideoModelOutputDuration1, str]] -): - root: Union[LumaVideoModelOutputDuration1, str] - - -class LumaVideoModelOutputResolution1(str, Enum): - field_540p = '540p' - field_720p = '720p' - field_1080p = '1080p' - field_4k = '4k' - - -class LumaVideoModelOutputResolution( - RootModel[Union[LumaVideoModelOutputResolution1, str]] -): - root: Union[LumaVideoModelOutputResolution1, str] - - class MachineStats(BaseModel): cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') disk_capacity: Optional[str] = Field( @@ -932,16 +496,6 @@ class MachineStats(BaseModel): ) -class MinimaxBaseResponse(BaseModel): - status_code: int = Field( - ..., - description='Status code. 0 indicates success, other values indicate errors.', - ) - status_msg: str = Field( - ..., description='Specific error details or success message.' - ) - - class File(BaseModel): bytes: Optional[int] = Field(None, description='File size in bytes') created_at: Optional[int] = Field( @@ -955,11 +509,6 @@ class File(BaseModel): purpose: Optional[str] = Field(None, description='The purpose of using the file') -class MinimaxFileRetrieveResponse(BaseModel): - base_resp: MinimaxBaseResponse - file: File - - class Status1(str, Enum): Queueing = 'Queueing' Preparing = 'Preparing' @@ -968,28 +517,6 @@ class Status1(str, Enum): Fail = 'Fail' -class MinimaxTaskResultResponse(BaseModel): - base_resp: MinimaxBaseResponse - file_id: Optional[str] = Field( - None, - description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', - ) - status: Status1 = Field( - ..., - description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", - ) - task_id: str = Field(..., description='The task ID being queried.') - - -class Model(str, Enum): - T2V_01_Director = 'T2V-01-Director' - I2V_01_Director = 'I2V-01-Director' - S2V_01 = 'S2V-01' - I2V_01 = 'I2V-01' - I2V_01_live = 'I2V-01-live' - T2V_01 = 'T2V-01' - - class SubjectReferenceItem(BaseModel): image: Optional[str] = Field( None, description='URL or base64 encoding of the subject reference image.' @@ -1000,40 +527,6 @@ class SubjectReferenceItem(BaseModel): ) -class MinimaxVideoGenerationRequest(BaseModel): - callback_url: Optional[str] = Field( - None, - description='Optional. URL to receive real-time status updates about the video generation task.', - ) - first_frame_image: Optional[str] = Field( - None, - description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', - ) - model: Model = Field( - ..., - description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', - ) - prompt: Optional[constr(max_length=2000)] = Field( - None, - description='Description of the video. Should be less than 2000 characters. Supports camera movement instructions in [brackets].', - ) - prompt_optimizer: Optional[bool] = Field( - True, - description='If true (default), the model will automatically optimize the prompt. Set to false for more precise control.', - ) - subject_reference: Optional[List[SubjectReferenceItem]] = Field( - None, - description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', - ) - - -class MinimaxVideoGenerationResponse(BaseModel): - base_resp: MinimaxBaseResponse - task_id: str = Field( - ..., description='The task ID for the asynchronous video generation task.' - ) - - class NodeStatus(str, Enum): NodeStatusActive = 'NodeStatusActive' NodeStatusDeleted = 'NodeStatusDeleted' @@ -1505,61 +998,6 @@ class ActionJobResult(BaseModel): workflow_name: Optional[str] = Field(None, description='Name of the workflow') -class KlingAccountError(KlingErrorResponse): - code: Optional[Code] = Field( - None, - description='- 1100: Account exception\n- 1101: Account in arrears (postpaid scenario)\n- 1102: Resource pack depleted or expired (prepaid scenario)\n- 1103: Unauthorized access to requested resource\n', - ) - - -class KlingAuthenticationError(KlingErrorResponse): - code: Optional[Code1] = Field( - None, - description='- 1000: Authentication failed\n- 1001: Authorization is empty\n- 1002: Authorization is invalid\n- 1003: Authorization is not yet valid\n- 1004: Authorization has expired\n', - ) - - -class LumaGenerationRequest(BaseModel): - aspect_ratio: LumaAspectRatio - callback_url: Optional[AnyUrl] = Field( - None, - description='The callback URL of the generation, a POST request with Generation object will be sent to the callback URL when the generation is dreaming, completed, or failed', - ) - duration: LumaVideoModelOutputDuration - generation_type: Optional[GenerationType1] = 'video' - keyframes: Optional[LumaKeyframes] = None - loop: Optional[bool] = Field(None, description='Whether to loop the video') - model: LumaVideoModel - prompt: str = Field(..., description='The prompt of the generation') - resolution: LumaVideoModelOutputResolution - - -class CharacterRef(BaseModel): - identity0: Optional[LumaImageIdentity] = None - - -class LumaImageGenerationRequest(BaseModel): - aspect_ratio: Optional[LumaAspectRatio] = '16:9' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback URL for the generation' - ) - character_ref: Optional[CharacterRef] = None - generation_type: Optional[GenerationType2] = 'image' - image_ref: Optional[List[LumaImageRef]] = None - model: Optional[LumaImageModel] = 'photon-1' - modify_image_ref: Optional[LumaModifyImageRef] = None - prompt: Optional[str] = Field(None, description='The prompt of the generation') - style_ref: Optional[List[LumaImageRef]] = None - - -class LumaUpscaleVideoGenerationRequest(BaseModel): - callback_url: Optional[AnyUrl] = Field( - None, description='The callback URL for the upscale' - ) - generation_type: Optional[GenerationType3] = 'upscale_video' - resolution: Optional[LumaVideoModelOutputResolution] = None - - class NodeVersion(BaseModel): changelog: Optional[str] = Field( None, description='Summary of changes made in this version' @@ -1706,28 +1144,6 @@ class StripePaymentIntent(BaseModel): transfer_group: Optional[Any] = None -class LumaGeneration(BaseModel): - assets: Optional[LumaAssets] = None - created_at: Optional[datetime] = Field( - None, description='The date and time when the generation was created' - ) - failure_reason: Optional[str] = Field( - None, description='The reason for the state of the generation' - ) - generation_type: Optional[LumaGenerationType] = None - id: Optional[UUID] = Field(None, description='The ID of the generation') - model: Optional[str] = Field(None, description='The model used for the generation') - request: Optional[ - Union[ - LumaGenerationRequest, - LumaImageGenerationRequest, - LumaUpscaleVideoGenerationRequest, - LumaAudioGenerationRequest, - ] - ] = Field(None, description='The request of the generation') - state: Optional[LumaState] = None - - class Publisher(BaseModel): createdAt: Optional[datetime] = Field( None, description='The date and time the publisher was created.' From cf209aa456c80745861fb00a05590775e2d06749 Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 11:35:43 -0700 Subject: [PATCH 37/40] fix API type error and add b64 support for 4o --- comfy_api_nodes/apis/__init__.py | 4 ++-- comfy_api_nodes/nodes_api.py | 20 ++++++++++++++------ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 4702e3a5839..85ced474399 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -573,7 +573,7 @@ class Quality(str, Enum): class OpenAIImageEditRequest(BaseModel): - background: Optional[Background] = Field( + background: Optional[str] = Field( None, description='Background transparency', examples=['opaque'] ) model: str = Field( @@ -596,7 +596,7 @@ class OpenAIImageEditRequest(BaseModel): description='A text description of the desired edit', examples=['Give the rocketship rainbow coloring'], ) - quality: Optional[Quality] = Field( + quality: Optional[str] = Field( None, description='The quality of the edited image', examples=['low'] ) size: Optional[str] = Field( diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 8a366157591..d0f977406f4 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -15,6 +15,7 @@ import requests import torch import math +import base64 def downscale_input(image): samples = image.movedim(-1,1) @@ -38,13 +39,20 @@ def validate_and_cast_response (response): # Get base64 image data image_url = data[0].url - if not image_url: - raise Exception("No image URL was generated in the response") - img_response = requests.get(image_url) - if img_response.status_code != 200: - raise Exception("Failed to download the image") + b64_data = data[0].b64_json + if not image_url and not b64_data: + raise Exception("No image was generated in the response") + + if b64_data: + img_data = base64.b64decode(b64_data) + img = Image.open(io.BytesIO(img_data)) + + elif image_url: + img_response = requests.get(image_url) + if img_response.status_code != 200: + raise Exception("Failed to download the image") + img = Image.open(io.BytesIO(img_response.content)) - img = Image.open(io.BytesIO(img_response.content)) img = img.convert("RGB") # Ensure RGB format # Convert to numpy array, normalize to float32 between 0 and 1 From 6fb5bb14317a1321bab25e9677a455fa0a094f9e Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 11:39:12 -0700 Subject: [PATCH 38/40] fix ruff --- comfy_api_nodes/apis/__init__.py | 4 ++-- comfy_api_nodes/nodes_api.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 85ced474399..b6ebce7949b 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -6,10 +6,10 @@ from datetime import datetime from enum import Enum -from typing import Any, Dict, List, Literal, Optional, Union +from typing import Any, Dict, List, Optional, Union from uuid import UUID -from pydantic import AnyUrl, BaseModel, Field, RootModel, confloat, conint, constr +from pydantic import AnyUrl, BaseModel, Field, confloat, conint class ComfyNode(BaseModel): category: Optional[str] = Field( diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index d0f977406f4..1910e8f746c 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -46,7 +46,7 @@ def validate_and_cast_response (response): if b64_data: img_data = base64.b64decode(b64_data) img = Image.open(io.BytesIO(img_data)) - + elif image_url: img_response = requests.get(image_url) if img_response.status_code != 200: From 18fba2a3c0ec9fa4920737ebd16e8e123171a67f Mon Sep 17 00:00:00 2001 From: thot-experiment Date: Wed, 23 Apr 2025 11:50:26 -0700 Subject: [PATCH 39/40] oops forgot mask scaling code --- comfy_api_nodes/nodes_api.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 1910e8f746c..92f4a0c87b9 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -365,7 +365,10 @@ def api_call(self, prompt, seed=0, quality="low", background="opaque", image=Non batch, height, width = mask.shape rgba_mask = torch.zeros(height, width, 4, device="cpu") rgba_mask[:,:,3] = (1-mask.squeeze().cpu()) - mask_np = (rgba_mask.numpy() * 255).astype(np.uint8) + + scaled_mask = downscale_input(rgba_mask.unsqueeze(0)).squeeze() + + mask_np = (scaled_mask.numpy() * 255).astype(np.uint8) mask_img = Image.fromarray(mask_np) mask_img_byte_arr = io.BytesIO() mask_img.save(mask_img_byte_arr, format='PNG') From 9b0633bddfde5f383a6c4510e57744e70ba53c43 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Wed, 23 Apr 2025 12:22:06 -0700 Subject: [PATCH 40/40] Remove unused types. --- comfy_api_nodes/apis/__init__.py | 785 +------------------------------ 1 file changed, 1 insertion(+), 784 deletions(-) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index b6ebce7949b..e7ea9b33283 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -6,52 +6,10 @@ from datetime import datetime from enum import Enum -from typing import Any, Dict, List, Optional, Union -from uuid import UUID +from typing import Any, Dict, List, Optional from pydantic import AnyUrl, BaseModel, Field, confloat, conint -class ComfyNode(BaseModel): - category: Optional[str] = Field( - None, - description='UI category where the node is listed, used for grouping nodes.', - ) - comfy_node_name: Optional[str] = Field( - None, description='Unique identifier for the node' - ) - deprecated: Optional[bool] = Field( - None, - description='Indicates if the node is deprecated. Deprecated nodes are hidden in the UI.', - ) - description: Optional[str] = Field( - None, description="Brief description of the node's functionality or purpose." - ) - experimental: Optional[bool] = Field( - None, - description='Indicates if the node is experimental, subject to changes or removal.', - ) - function: Optional[str] = Field( - None, description='Name of the entry-point function to execute the node.' - ) - input_types: Optional[str] = Field(None, description='Defines input parameters') - output_is_list: Optional[List[bool]] = Field( - None, description='Boolean values indicating if each output is a list.' - ) - return_names: Optional[str] = Field( - None, description='Names of the outputs for clarity in workflows.' - ) - return_types: Optional[str] = Field( - None, description='Specifies the types of outputs produced by the node.' - ) - - -class ComfyNodeCloudBuildInfo(BaseModel): - build_id: Optional[str] = None - location: Optional[str] = None - project_id: Optional[str] = None - project_number: Optional[str] = None - - class Customer(BaseModel): createdAt: Optional[datetime] = Field( None, description='The date and time the user was created' @@ -78,22 +36,6 @@ class ErrorResponse(BaseModel): error: str message: str - -class GitCommitSummary(BaseModel): - author: Optional[str] = Field(None, description='The author of the commit') - branch_name: Optional[str] = Field( - None, description='The branch where the commit was made' - ) - commit_hash: Optional[str] = Field(None, description='The hash of the commit') - commit_name: Optional[str] = Field(None, description='The name of the commit') - status_summary: Optional[Dict[str, str]] = Field( - None, description='A map of operating system to status pairs' - ) - timestamp: Optional[datetime] = Field( - None, description='The timestamp when the commit was made' - ) - - class ImageRequest(BaseModel): aspect_ratio: Optional[str] = Field( None, @@ -164,16 +106,6 @@ class Code1(Enum): int_1004 = 1004 -class KlingErrorResponse(BaseModel): - code: int = Field( - ..., description='Error code value as defined in the API documentation' - ) - message: str = Field(..., description='Human-readable error message') - request_id: str = Field( - ..., description='Request ID for tracking and troubleshooting' - ) - - class AspectRatio(str, Enum): field_16_9 = '16:9' field_9_16 = '9:16' @@ -346,210 +278,6 @@ class ResourcePackSubscribeInfo(BaseModel): status: Optional[Status] = Field(None, description='Resource Package Status') total_quantity: Optional[float] = Field(None, description='Total quantity') - -class Data3(BaseModel): - code: Optional[int] = Field(None, description='Error code; 0 indicates success') - msg: Optional[str] = Field(None, description='Error information') - resource_pack_subscribe_infos: Optional[List[ResourcePackSubscribeInfo]] = Field( - None, description='Resource package list' - ) - - -class Code3(Enum): - int_5000 = 5000 - int_5001 = 5001 - int_5002 = 5002 - - -class Code4(Enum): - int_1300 = 1300 - int_1301 = 1301 - int_1302 = 1302 - int_1303 = 1303 - int_1304 = 1304 - - -class CameraControl2(BaseModel): - config: Optional[Config] = None - type: Optional[Type] = Field(None, description='Predefined camera movements type') - - -class TaskResult3(BaseModel): - videos: Optional[List[Video]] = None - - -class Data4(BaseModel): - created_at: Optional[int] = Field(None, description='Task creation time') - task_id: Optional[str] = Field(None, description='Task ID') - task_info: Optional[TaskInfo] = None - task_result: Optional[TaskResult3] = None - task_status: Optional[TaskStatus] = None - updated_at: Optional[int] = Field(None, description='Task update time') - - -class CameraControl3(BaseModel): - config: Optional[Config] = None - type: Optional[Type] = Field(None, description='Predefined camera movements type') - - -class TaskResult4(BaseModel): - videos: Optional[List[Video]] = None - - -class Data5(BaseModel): - created_at: Optional[int] = Field(None, description='Task creation time') - task_id: Optional[str] = Field(None, description='Task ID') - task_info: Optional[TaskInfo] = None - task_result: Optional[TaskResult4] = None - task_status: Optional[TaskStatus] = None - updated_at: Optional[int] = Field(None, description='Task update time') - - -class CameraControl4(BaseModel): - config: Optional[Config] = None - type: Optional[Type] = Field(None, description='Predefined camera movements type') - - -class TaskResult5(BaseModel): - videos: Optional[List[Video]] = None - - -class Data6(BaseModel): - created_at: Optional[int] = Field(None, description='Task creation time') - task_id: Optional[str] = Field(None, description='Task ID') - task_info: Optional[TaskInfo] = None - task_result: Optional[TaskResult5] = None - task_status: Optional[TaskStatus] = None - updated_at: Optional[int] = Field(None, description='Task update time') - - -class ModelName6(str, Enum): - kolors_virtual_try_on_v1 = 'kolors-virtual-try-on-v1' - kolors_virtual_try_on_v1_5 = 'kolors-virtual-try-on-v1-5' - - -class Image1(BaseModel): - index: Optional[int] = Field(None, description='Image Number') - url: Optional[AnyUrl] = Field(None, description='URL for generated image') - - -class TaskResult6(BaseModel): - images: Optional[List[Image1]] = None - - -class Data7(BaseModel): - created_at: Optional[int] = Field(None, description='Task creation time') - task_id: Optional[str] = Field(None, description='Task ID') - task_result: Optional[TaskResult6] = None - task_status: Optional[TaskStatus] = None - task_status_msg: Optional[str] = Field(None, description='Task status information') - updated_at: Optional[int] = Field(None, description='Task update time') - - -class GenerationType(str, Enum): - add_audio = 'add_audio' - - -class Type5(str, Enum): - generation = 'generation' - - -class GenerationType1(str, Enum): - video = 'video' - - -class GenerationType2(str, Enum): - image = 'image' - - -class Type6(str, Enum): - image = 'image' - - -class MachineStats(BaseModel): - cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') - disk_capacity: Optional[str] = Field( - None, description='Total disk capacity on the machine.' - ) - gpu_type: Optional[str] = Field( - None, description='The GPU type. eg. NVIDIA Tesla K80' - ) - initial_cpu: Optional[str] = Field( - None, description='Initial CPU available before the job starts.' - ) - initial_disk: Optional[str] = Field( - None, description='Initial disk available before the job starts.' - ) - initial_ram: Optional[str] = Field( - None, description='Initial RAM available before the job starts.' - ) - machine_name: Optional[str] = Field(None, description='Name of the machine.') - memory_capacity: Optional[str] = Field( - None, description='Total memory on the machine.' - ) - os_version: Optional[str] = Field( - None, description='The operating system version. eg. Ubuntu Linux 20.04' - ) - pip_freeze: Optional[str] = Field(None, description='The pip freeze output') - vram_time_series: Optional[Dict[str, Any]] = Field( - None, description='Time series of VRAM usage.' - ) - - -class File(BaseModel): - bytes: Optional[int] = Field(None, description='File size in bytes') - created_at: Optional[int] = Field( - None, description='Unix timestamp when the file was created, in seconds' - ) - download_url: Optional[str] = Field( - None, description='The URL to download the video' - ) - file_id: Optional[int] = Field(None, description='Unique identifier for the file') - filename: Optional[str] = Field(None, description='The name of the file') - purpose: Optional[str] = Field(None, description='The purpose of using the file') - - -class Status1(str, Enum): - Queueing = 'Queueing' - Preparing = 'Preparing' - Processing = 'Processing' - Success = 'Success' - Fail = 'Fail' - - -class SubjectReferenceItem(BaseModel): - image: Optional[str] = Field( - None, description='URL or base64 encoding of the subject reference image.' - ) - mask: Optional[str] = Field( - None, - description='URL or base64 encoding of the mask for the subject reference image.', - ) - - -class NodeStatus(str, Enum): - NodeStatusActive = 'NodeStatusActive' - NodeStatusDeleted = 'NodeStatusDeleted' - NodeStatusBanned = 'NodeStatusBanned' - - -class NodeVersionStatus(str, Enum): - NodeVersionStatusActive = 'NodeVersionStatusActive' - NodeVersionStatusDeleted = 'NodeVersionStatusDeleted' - NodeVersionStatusBanned = 'NodeVersionStatusBanned' - NodeVersionStatusPending = 'NodeVersionStatusPending' - NodeVersionStatusFlagged = 'NodeVersionStatusFlagged' - - -class NodeVersionUpdateRequest(BaseModel): - changelog: Optional[str] = Field( - None, description='The changelog describing the version changes.' - ) - deprecated: Optional[bool] = Field( - None, description='Whether the version is deprecated.' - ) - - class Background(str, Enum): transparent = 'transparent' opaque = 'opaque' @@ -682,216 +410,6 @@ class Datum1(BaseModel): class OpenAIImageGenerationResponse(BaseModel): data: Optional[List[Datum1]] = None - - -class PersonalAccessToken(BaseModel): - createdAt: Optional[datetime] = Field( - None, description='[Output Only]The date and time the token was created.' - ) - description: Optional[str] = Field( - None, - description="Optional. A more detailed description of the token's intended use.", - ) - id: Optional[UUID] = Field(None, description='Unique identifier for the GitCommit') - name: Optional[str] = Field( - None, - description='Required. The name of the token. Can be a simple description.', - ) - token: Optional[str] = Field( - None, - description='[Output Only]. The personal access token. Only returned during creation.', - ) - - -class PublisherStatus(str, Enum): - PublisherStatusActive = 'PublisherStatusActive' - PublisherStatusBanned = 'PublisherStatusBanned' - - -class PublisherUser(BaseModel): - email: Optional[str] = Field(None, description='The email address for this user.') - id: Optional[str] = Field(None, description='The unique id for this user.') - name: Optional[str] = Field(None, description='The name for this user.') - - -class RecraftImageGenerationRequest(BaseModel): - model: str = Field( - ..., description='The model to use for generation (e.g., "recraftv3")' - ) - n: conint(ge=1, le=4) = Field(..., description='The number of images to generate') - prompt: str = Field( - ..., description='The text prompt describing the image to generate' - ) - size: str = Field( - ..., description='The size of the generated image (e.g., "1024x1024")' - ) - style: Optional[str] = Field( - None, - description='The style to apply to the generated image (e.g., "digital_illustration")', - ) - - -class Datum2(BaseModel): - image_id: Optional[str] = Field( - None, description='Unique identifier for the generated image' - ) - url: Optional[str] = Field(None, description='URL to access the generated image') - - -class RecraftImageGenerationResponse(BaseModel): - created: int = Field( - ..., description='Unix timestamp when the generation was created' - ) - credits: int = Field(..., description='Number of credits used for the generation') - data: List[Datum2] = Field(..., description='Array of generated image information') - - -class StorageFile(BaseModel): - file_path: Optional[str] = Field(None, description='Path to the file in storage') - id: Optional[UUID] = Field( - None, description='Unique identifier for the storage file' - ) - public_url: Optional[str] = Field(None, description='Public URL') - - -class StripeAddress(BaseModel): - city: Optional[str] = None - country: Optional[str] = None - line1: Optional[str] = None - line2: Optional[str] = None - postal_code: Optional[str] = None - state: Optional[str] = None - - -class StripeAmountDetails(BaseModel): - tip: Optional[Dict[str, Any]] = None - - -class StripeBillingDetails(BaseModel): - address: Optional[StripeAddress] = None - email: Optional[str] = None - name: Optional[str] = None - phone: Optional[str] = None - tax_id: Optional[Any] = None - - -class Checks(BaseModel): - address_line1_check: Optional[Any] = None - address_postal_code_check: Optional[Any] = None - cvc_check: Optional[str] = None - - -class ExtendedAuthorization(BaseModel): - status: Optional[str] = None - - -class IncrementalAuthorization(BaseModel): - status: Optional[str] = None - - -class Multicapture(BaseModel): - status: Optional[str] = None - - -class NetworkToken(BaseModel): - used: Optional[bool] = None - - -class Overcapture(BaseModel): - maximum_amount_capturable: Optional[int] = None - status: Optional[str] = None - - -class StripeCardDetails(BaseModel): - amount_authorized: Optional[int] = None - authorization_code: Optional[Any] = None - brand: Optional[str] = None - checks: Optional[Checks] = None - country: Optional[str] = None - exp_month: Optional[int] = None - exp_year: Optional[int] = None - extended_authorization: Optional[ExtendedAuthorization] = None - fingerprint: Optional[str] = None - funding: Optional[str] = None - incremental_authorization: Optional[IncrementalAuthorization] = None - installments: Optional[Any] = None - last4: Optional[str] = None - mandate: Optional[Any] = None - multicapture: Optional[Multicapture] = None - network: Optional[str] = None - network_token: Optional[NetworkToken] = None - network_transaction_id: Optional[str] = None - overcapture: Optional[Overcapture] = None - regulated_status: Optional[str] = None - three_d_secure: Optional[Any] = None - wallet: Optional[Any] = None - - -class Object(str, Enum): - charge = 'charge' - - -class Object1(str, Enum): - event = 'event' - - -class Type7(str, Enum): - payment_intent_succeeded = 'payment_intent.succeeded' - - -class StripeOutcome(BaseModel): - advice_code: Optional[Any] = None - network_advice_code: Optional[Any] = None - network_decline_code: Optional[Any] = None - network_status: Optional[str] = None - reason: Optional[Any] = None - risk_level: Optional[str] = None - risk_score: Optional[int] = None - seller_message: Optional[str] = None - type: Optional[str] = None - - -class Object2(str, Enum): - payment_intent = 'payment_intent' - - -class StripePaymentMethodDetails(BaseModel): - card: Optional[StripeCardDetails] = None - type: Optional[str] = None - - -class Card(BaseModel): - installments: Optional[Any] = None - mandate_options: Optional[Any] = None - network: Optional[Any] = None - request_three_d_secure: Optional[str] = None - - -class StripePaymentMethodOptions(BaseModel): - card: Optional[Card] = None - - -class StripeRefundList(BaseModel): - data: Optional[List[Dict[str, Any]]] = None - has_more: Optional[bool] = None - object: Optional[str] = None - total_count: Optional[int] = None - url: Optional[str] = None - - -class StripeRequestInfo(BaseModel): - id: Optional[str] = None - idempotency_key: Optional[str] = None - - -class StripeShipping(BaseModel): - address: Optional[StripeAddress] = None - carrier: Optional[str] = None - name: Optional[str] = None - phone: Optional[str] = None - tracking_number: Optional[str] = None - - class User(BaseModel): email: Optional[str] = Field(None, description='The email address for this user.') id: Optional[str] = Field(None, description='The unique id for this user.') @@ -902,304 +420,3 @@ class User(BaseModel): None, description='Indicates if the user is approved.' ) name: Optional[str] = Field(None, description='The name for this user.') - - -class Image2(BaseModel): - bytesBase64Encoded: str - gcsUri: Optional[str] = None - mimeType: Optional[str] = None - - -class Image3(BaseModel): - bytesBase64Encoded: Optional[str] = None - gcsUri: str - mimeType: Optional[str] = None - - -class Instance(BaseModel): - image: Optional[Union[Image2, Image3]] = Field( - None, description='Optional image to guide video generation' - ) - prompt: str = Field(..., description='Text description of the video') - - -class PersonGeneration(str, Enum): - ALLOW = 'ALLOW' - BLOCK = 'BLOCK' - - -class Parameters(BaseModel): - aspectRatio: Optional[str] = Field(None, examples=['16:9']) - durationSeconds: Optional[int] = None - enhancePrompt: Optional[bool] = None - negativePrompt: Optional[str] = None - personGeneration: Optional[PersonGeneration] = None - sampleCount: Optional[int] = None - seed: Optional[int] = None - storageUri: Optional[str] = Field( - None, description='Optional Cloud Storage URI to upload the video' - ) - - -class VeoRequestBody(BaseModel): - instances: Optional[List[Instance]] = None - parameters: Optional[Parameters] = None - - -class WorkflowRunStatus(str, Enum): - WorkflowRunStatusStarted = 'WorkflowRunStatusStarted' - WorkflowRunStatusFailed = 'WorkflowRunStatusFailed' - WorkflowRunStatusCompleted = 'WorkflowRunStatusCompleted' - - -class ActionJobResult(BaseModel): - action_job_id: Optional[str] = Field( - None, description='Identifier of the job this result belongs to' - ) - action_run_id: Optional[str] = Field( - None, description='Identifier of the run this result belongs to' - ) - author: Optional[str] = Field(None, description='The author of the commit') - avg_vram: Optional[int] = Field( - None, description='The average VRAM used by the job' - ) - branch_name: Optional[str] = Field( - None, description='Name of the relevant git branch' - ) - comfy_run_flags: Optional[str] = Field( - None, description='The comfy run flags. E.g. `--low-vram`' - ) - commit_hash: Optional[str] = Field(None, description='The hash of the commit') - commit_id: Optional[str] = Field(None, description='The ID of the commit') - commit_message: Optional[str] = Field(None, description='The message of the commit') - commit_time: Optional[int] = Field( - None, description='The Unix timestamp when the commit was made' - ) - cuda_version: Optional[str] = Field(None, description='CUDA version used') - end_time: Optional[int] = Field( - None, description='The end time of the job as a Unix timestamp.' - ) - git_repo: Optional[str] = Field(None, description='The repository name') - id: Optional[UUID] = Field(None, description='Unique identifier for the job result') - job_trigger_user: Optional[str] = Field( - None, description='The user who triggered the job.' - ) - machine_stats: Optional[MachineStats] = None - operating_system: Optional[str] = Field(None, description='Operating system used') - peak_vram: Optional[int] = Field(None, description='The peak VRAM used by the job') - pr_number: Optional[str] = Field(None, description='The pull request number') - python_version: Optional[str] = Field(None, description='PyTorch version used') - pytorch_version: Optional[str] = Field(None, description='PyTorch version used') - start_time: Optional[int] = Field( - None, description='The start time of the job as a Unix timestamp.' - ) - status: Optional[WorkflowRunStatus] = None - storage_file: Optional[StorageFile] = None - workflow_name: Optional[str] = Field(None, description='Name of the workflow') - - -class NodeVersion(BaseModel): - changelog: Optional[str] = Field( - None, description='Summary of changes made in this version' - ) - comfy_node_extract_status: Optional[str] = Field( - None, description='The status of comfy node extraction process.' - ) - createdAt: Optional[datetime] = Field( - None, description='The date and time the version was created.' - ) - dependencies: Optional[List[str]] = Field( - None, description='A list of pip dependencies required by the node.' - ) - deprecated: Optional[bool] = Field( - None, description='Indicates if this version is deprecated.' - ) - downloadUrl: Optional[str] = Field( - None, description='[Output Only] URL to download this version of the node' - ) - id: Optional[str] = None - node_id: Optional[str] = Field( - None, description='The unique identifier of the node.' - ) - status: Optional[NodeVersionStatus] = None - status_reason: Optional[str] = Field( - None, description='The reason for the status change.' - ) - version: Optional[str] = Field( - None, - description='The version identifier, following semantic versioning. Must be unique for the node.', - ) - - -class PublisherMember(BaseModel): - id: Optional[str] = Field( - None, description='The unique identifier for the publisher member.' - ) - role: Optional[str] = Field( - None, description='The role of the user in the publisher.' - ) - user: Optional[PublisherUser] = None - - -class StripeCharge(BaseModel): - amount: Optional[int] = None - amount_captured: Optional[int] = None - amount_refunded: Optional[int] = None - application: Optional[str] = None - application_fee: Optional[str] = None - application_fee_amount: Optional[int] = None - balance_transaction: Optional[str] = None - billing_details: Optional[StripeBillingDetails] = None - calculated_statement_descriptor: Optional[str] = None - captured: Optional[bool] = None - created: Optional[int] = None - currency: Optional[str] = None - customer: Optional[str] = None - description: Optional[str] = None - destination: Optional[Any] = None - dispute: Optional[Any] = None - disputed: Optional[bool] = None - failure_balance_transaction: Optional[Any] = None - failure_code: Optional[Any] = None - failure_message: Optional[Any] = None - fraud_details: Optional[Dict[str, Any]] = None - id: Optional[str] = None - invoice: Optional[Any] = None - livemode: Optional[bool] = None - metadata: Optional[Dict[str, Any]] = None - object: Optional[Object] = None - on_behalf_of: Optional[Any] = None - order: Optional[Any] = None - outcome: Optional[StripeOutcome] = None - paid: Optional[bool] = None - payment_intent: Optional[str] = None - payment_method: Optional[str] = None - payment_method_details: Optional[StripePaymentMethodDetails] = None - radar_options: Optional[Dict[str, Any]] = None - receipt_email: Optional[str] = None - receipt_number: Optional[str] = None - receipt_url: Optional[str] = None - refunded: Optional[bool] = None - refunds: Optional[StripeRefundList] = None - review: Optional[Any] = None - shipping: Optional[StripeShipping] = None - source: Optional[Any] = None - source_transfer: Optional[Any] = None - statement_descriptor: Optional[Any] = None - statement_descriptor_suffix: Optional[Any] = None - status: Optional[str] = None - transfer_data: Optional[Any] = None - transfer_group: Optional[Any] = None - - -class StripeChargeList(BaseModel): - data: Optional[List[StripeCharge]] = None - has_more: Optional[bool] = None - object: Optional[str] = None - total_count: Optional[int] = None - url: Optional[str] = None - - -class StripePaymentIntent(BaseModel): - amount: Optional[int] = None - amount_capturable: Optional[int] = None - amount_details: Optional[StripeAmountDetails] = None - amount_received: Optional[int] = None - application: Optional[str] = None - application_fee_amount: Optional[int] = None - automatic_payment_methods: Optional[Any] = None - canceled_at: Optional[int] = None - cancellation_reason: Optional[str] = None - capture_method: Optional[str] = None - charges: Optional[StripeChargeList] = None - client_secret: Optional[str] = None - confirmation_method: Optional[str] = None - created: Optional[int] = None - currency: Optional[str] = None - customer: Optional[str] = None - description: Optional[str] = None - id: Optional[str] = None - invoice: Optional[str] = None - last_payment_error: Optional[Any] = None - latest_charge: Optional[str] = None - livemode: Optional[bool] = None - metadata: Optional[Dict[str, Any]] = None - next_action: Optional[Any] = None - object: Optional[Object2] = None - on_behalf_of: Optional[Any] = None - payment_method: Optional[str] = None - payment_method_configuration_details: Optional[Any] = None - payment_method_options: Optional[StripePaymentMethodOptions] = None - payment_method_types: Optional[List[str]] = None - processing: Optional[Any] = None - receipt_email: Optional[str] = None - review: Optional[Any] = None - setup_future_usage: Optional[Any] = None - shipping: Optional[StripeShipping] = None - source: Optional[Any] = None - statement_descriptor: Optional[Any] = None - statement_descriptor_suffix: Optional[Any] = None - status: Optional[str] = None - transfer_data: Optional[Any] = None - transfer_group: Optional[Any] = None - - -class Publisher(BaseModel): - createdAt: Optional[datetime] = Field( - None, description='The date and time the publisher was created.' - ) - description: Optional[str] = None - id: Optional[str] = Field( - None, - description="The unique identifier for the publisher. It's akin to a username. Should be lowercase.", - ) - logo: Optional[str] = Field(None, description="URL to the publisher's logo.") - members: Optional[List[PublisherMember]] = Field( - None, description='A list of members in the publisher.' - ) - name: Optional[str] = None - source_code_repo: Optional[str] = None - status: Optional[PublisherStatus] = None - support: Optional[str] = None - website: Optional[str] = None - - -class Data8(BaseModel): - object: Optional[StripePaymentIntent] = None - - -class StripeEvent(BaseModel): - api_version: Optional[str] = None - created: Optional[int] = None - data: Data8 - id: str - livemode: Optional[bool] = None - object: Object1 - pending_webhooks: Optional[int] = None - request: Optional[StripeRequestInfo] = None - type: Type7 - - -class Node(BaseModel): - author: Optional[str] = None - category: Optional[str] = Field(None, description='The category of the node.') - description: Optional[str] = None - downloads: Optional[int] = Field( - None, description='The number of downloads of the node.' - ) - icon: Optional[str] = Field(None, description="URL to the node's icon.") - id: Optional[str] = Field(None, description='The unique identifier of the node.') - latest_version: Optional[NodeVersion] = None - license: Optional[str] = Field( - None, description="The path to the LICENSE file in the node's repository." - ) - name: Optional[str] = Field(None, description='The display name of the node.') - publisher: Optional[Publisher] = None - rating: Optional[float] = Field(None, description='The average rating of the node.') - repository: Optional[str] = Field(None, description="URL to the node's repository.") - status: Optional[NodeStatus] = None - status_detail: Optional[str] = Field( - None, description='The status detail of the node.' - ) - tags: Optional[List[str]] = None - translations: Optional[Dict[str, Dict[str, Any]]] = None