Created
November 27, 2025 03:26
-
-
Save sonic74/423c03483fbc13e7fd99ac97bcec8ff8 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import io | |
| import os | |
| import requests | |
| import torch | |
| from diffusers import Flux2Pipeline, Flux2Transformer2DModel | |
| repo_id = "diffusers/FLUX.2-dev-bnb-4bit" | |
| torch_dtype = torch.bfloat16 | |
| device = "cuda" | |
| def remote_text_encoder(prompts: str | list[str]): | |
| def _encode_single(prompt: str): | |
| response = requests.post( | |
| "https://remote-text-encoder-flux-2.huggingface.co/predict", | |
| json={"prompt": prompt}, | |
| headers={"Authorization": f"Bearer {os.environ['HF_TOKEN']}", "Content-Type": "application/json"}, | |
| ) | |
| assert response.status_code == 200, f"{response.status_code=}" | |
| return torch.load(io.BytesIO(response.content)) | |
| if isinstance(prompts, (list, tuple)): | |
| embeds = [_encode_single(p) for p in prompts] | |
| return torch.cat(embeds, dim=0) | |
| return _encode_single(prompts).to("cuda") | |
| transformer = Flux2Transformer2DModel.from_pretrained( | |
| transformer_id, subfolder="transformer", torch_dtype=torch_dtype, device_map="cpu" | |
| ) | |
| pipe = Flux2Pipeline.from_pretrained( | |
| repo_id, | |
| text_encoder=None, | |
| transformer=transformer, | |
| torch_dtype=torch_dtype, | |
| ) | |
| pipe.transformer.enable_group_offload( | |
| onload_device=device, | |
| offload_device="cpu", | |
| offload_type="leaf_level", | |
| use_stream=True, | |
| # low_cpu_mem_usage=True # uncomment for lower RAM usage | |
| ) | |
| pipe.to(device) | |
| prompt = "a photo of a forest with mist swirling around the tree trunks. The word 'FLUX.2' is painted over it in big, red brush strokes with visible texture" | |
| prompt_embeds = remote_text_encoder(prompt) | |
| image = pipe( | |
| prompt_embeds=prompt_embeds, | |
| generator=torch.Generator(device=device).manual_seed(42), | |
| num_inference_steps=50, | |
| guidance_scale=4, | |
| height=1024, | |
| width=1024, | |
| ).images[0] |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment