Issue: Specified workflow does not appear to be an API workflow json

Hi, I’m getting Specified workflow does not appear to be an API workflow JSON when I try to run a workflow. I’m using this workflow ComfyUI-FramePackWrapper/example_workflows/framepack_hv_example.json at main · kijai/ComfyUI-FramePackWrapper · GitHub and its correct

from beam import Image, endpoint, Output, Volume
import subprocess

def setup_comfyui_environment():
    image = (
        Image()
        .add_commands(["apt update && apt install git -y"])
        .add_python_packages(
            [
                "fastapi[standard]==0.115.4",
                "comfy-cli==1.3.5",
                "huggingface_hub[hf_transfer]==0.26.2",
                "opencv-python",
            ]
        )
        .add_commands(
            [
                "apt update && apt install -y libgl1 libglib2.0-0",
            ]
        )
        .add_commands(
            [
                "comfy --skip-prompt install --nvidia --version 0.3.10",
            ]
        )
    )

    # Install ComfyUI-FramePackWrapper custom node
    image = image.add_commands(
        [
            "rm -rf /root/comfy/ComfyUI/custom_nodes/ComfyUI-FramePackWrapper || true",
            "cd /root/comfy/ComfyUI/custom_nodes && "
            "git clone https://github.com/kijai/ComfyUI-FramePackWrapper.git && "
            "cd ComfyUI-FramePackWrapper && "
            "pip install -r requirements.txt",
        ]
    )

    # Install VideoHelperSuite
    image = image.add_commands(
        [
            "cd /root/comfy/ComfyUI/custom_nodes && "
            "git clone https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite.git && "
            "cd ComfyUI-VideoHelperSuite && "
            "pip install -r requirements.txt"
        ]
    )

    # Install KJNodes
    image = image.add_commands(
        [
            "cd /root/comfy/ComfyUI/custom_nodes && "
            "git clone https://github.com/kijai/ComfyUI-KJNodes.git && "
            "cd ComfyUI-KJNodes && "
            "pip install -r requirements.txt"
        ]
    )

    # Install ComfyUI-essentials
    image = image.add_commands(
        [
            "cd /root/comfy/ComfyUI/custom_nodes && "
            "git clone https://github.com/cubiq/ComfyUI_essentials.git"
        ]
    )

    return image


image = setup_comfyui_environment()

def download_models():
    """Download all necessary models for FramePack workflow"""
    from huggingface_hub import snapshot_download, hf_hub_download
    import os

    os.makedirs("/root/comfy/ComfyUI/models/diffusion_models", exist_ok=True)
    os.makedirs("/root/comfy/ComfyUI/models/text_encoders", exist_ok=True)
    os.makedirs("/root/comfy/ComfyUI/models/clip_vision", exist_ok=True)
    os.makedirs("/root/comfy/ComfyUI/models/vae", exist_ok=True)


    # 1. Diffusion model 
    diffusion_model = hf_hub_download(
        repo_id="Kijai/HunyuanVideo_comfy",
        filename="FramePackI2V_HY_fp8_e4m3fn.safetensors",
        cache_dir="/comfy-cache",
    )
    subprocess.run(
        f"ln -s {diffusion_model} /root/comfy/ComfyUI/models/diffusion_models/FramePackI2V_HY_fp8_e4m3fn.safetensors",
        shell=True,
        check=True,
    )

    # 2. CLIP Vision model
    clip_vision = hf_hub_download(
        repo_id="Comfy-Org/sigclip_vision_384",
        filename="sigclip_vision_patch14_384.safetensors",
        cache_dir="/comfy-cache",
    )
    subprocess.run(
        f"ln -s {clip_vision} /root/comfy/ComfyUI/models/clip_vision/sigclip_vision_patch14_384.safetensors",
        shell=True,
        check=True,
    )

    # 3. Text Encoders
    clip_l = hf_hub_download(
        repo_id="Comfy-Org/HunyuanVideo_repackaged",
        filename="split_files/text_encoders/clip_l.safetensors",
        cache_dir="/comfy-cache",
    )
    subprocess.run(
        f"ln -s {clip_l} /root/comfy/ComfyUI/models/text_encoders/clip_l.safetensors",
        shell=True,
        check=True,
    )

    llava_llama3 = hf_hub_download(
        repo_id="Comfy-Org/HunyuanVideo_repackaged",
        filename="split_files/text_encoders/llava_llama3_fp16.safetensors",
        cache_dir="/comfy-cache",
    )
    subprocess.run(
        f"ln -s {llava_llama3} /root/comfy/ComfyUI/models/text_encoders/llava_llama3_fp16.safetensors",
        shell=True,
        check=True,
    )

    # 4. VAE model
    vae = hf_hub_download(
        repo_id="Comfy-Org/HunyuanVideo_repackaged",
        filename="split_files/vae/hunyuan_video_vae_bf16.safetensors",
        cache_dir="/comfy-cache",
    )
    subprocess.run(
        f"ln -s {vae} /root/comfy/ComfyUI/models/vae/hunyuan_video_vae_bf16.safetensors",
        shell=True,
        check=True,
    )

    return {"status": "Models downloaded and linked successfully"}


@endpoint(
    name="comfy",
    image=image,
    on_start=download_models,
    cpu=12,
    memory="32Gi",
    gpu="A10G",
)
def handler():
    import subprocess
    import json
    from pathlib import Path
    import uuid
    import requests

    WORKFLOW_TEMPLATE = Path(__file__).parent / "workflow_api.json"
  

    # Load the workflow template
    workflow_data = json.loads(WORKFLOW_TEMPLATE.read_text())

    nodes = workflow_data.get("nodes", [])

    print("Nodes in workflow:", nodes)

    try:
        cmd = f"comfy run --workflow /mnt/code/workflow_api.json --wait --timeout 1800 --verbose"
        subprocess.run(cmd, shell=True, check=True)
    except subprocess.CalledProcessError as e:
        return "Error during inference: " + str(e)