Skip to content
10 changes: 10 additions & 0 deletions packages/inference/src/lib/getProviderHelper.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ import type {
ImageToImageTaskHelper,
ImageToTextTaskHelper,
ImageToVideoTaskHelper,
ImageTextToImageTaskHelper,
ImageTextToVideoTaskHelper,
ObjectDetectionTaskHelper,
QuestionAnsweringTaskHelper,
SentenceSimilarityTaskHelper,
Expand Down Expand Up @@ -276,6 +278,14 @@ export function getProviderHelper(
provider: InferenceProviderOrPolicy,
task: "image-to-video"
): ImageToVideoTaskHelper & TaskProviderHelper;
export function getProviderHelper(
provider: InferenceProviderOrPolicy,
task: "image-text-to-image"
): ImageTextToImageTaskHelper & TaskProviderHelper;
export function getProviderHelper(
provider: InferenceProviderOrPolicy,
task: "image-text-to-video"
): ImageTextToVideoTaskHelper & TaskProviderHelper;
export function getProviderHelper(
provider: InferenceProviderOrPolicy,
task: "sentence-similarity"
Expand Down
16 changes: 16 additions & 0 deletions packages/inference/src/providers/providerHelper.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ import type {
ImageToTextInput,
ImageToTextOutput,
ImageToVideoInput,
ImageTextToImageInput,
ImageTextToVideoInput,
ObjectDetectionInput,
ObjectDetectionOutput,
QuestionAnsweringInput,
Expand Down Expand Up @@ -54,6 +56,8 @@ import { toArray } from "../utils/toArray.js";
import type { ImageToImageArgs } from "../tasks/cv/imageToImage.js";
import type { AutomaticSpeechRecognitionArgs } from "../tasks/audio/automaticSpeechRecognition.js";
import type { ImageToVideoArgs } from "../tasks/cv/imageToVideo.js";
import type { ImageTextToImageArgs } from "../tasks/cv/imageTextToImage.js";
import type { ImageTextToVideoArgs } from "../tasks/cv/imageTextToVideo.js";
import type { ImageSegmentationArgs } from "../tasks/cv/imageSegmentation.js";

/**
Expand Down Expand Up @@ -159,6 +163,18 @@ export interface ImageToVideoTaskHelper {
preparePayloadAsync(args: ImageToVideoArgs): Promise<RequestArgs>;
}

export interface ImageTextToImageTaskHelper {
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<Blob>;
preparePayload(params: BodyParams<ImageTextToImageInput & BaseArgs>): Record<string, unknown>;
preparePayloadAsync(args: ImageTextToImageArgs): Promise<RequestArgs>;
}

export interface ImageTextToVideoTaskHelper {
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<Blob>;
preparePayload(params: BodyParams<ImageTextToVideoInput & BaseArgs>): Record<string, unknown>;
preparePayloadAsync(args: ImageTextToVideoArgs): Promise<RequestArgs>;
}

export interface ImageSegmentationTaskHelper {
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<ImageSegmentationOutput>;
preparePayload(params: BodyParams<ImageSegmentationInput & BaseArgs>): Record<string, unknown> | BodyInit;
Expand Down
5 changes: 5 additions & 0 deletions packages/inference/src/snippets/getInferenceSnippets.ts
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,10 @@ const HF_PYTHON_METHODS: Partial<Record<WidgetType, string>> = {
"image-classification": "image_classification",
"image-segmentation": "image_segmentation",
"image-to-image": "image_to_image",
"image-to-video": "image_to_video",
"image-to-text": "image_to_text",
"image-text-to-image": "image_text_to_image",
"image-text-to-video": "image_text_to_video",
"object-detection": "object_detection",
"question-answering": "question_answering",
"sentence-similarity": "sentence_similarity",
Expand Down Expand Up @@ -390,7 +393,9 @@ const snippets: Partial<
"fill-mask": snippetGenerator("basic"),
"image-classification": snippetGenerator("basicImage"),
"image-segmentation": snippetGenerator("basicImage"),
"image-text-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
"image-text-to-text": snippetGenerator("conversational"),
"image-text-to-video": snippetGenerator("imageToVideo", prepareImageToImageInput),
"image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
"image-to-text": snippetGenerator("basicImage"),
"image-to-video": snippetGenerator("imageToVideo", prepareImageToImageInput),
Expand Down
22 changes: 22 additions & 0 deletions packages/inference/src/tasks/cv/imageTextToImage.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import type { ImageTextToImageInput } from "@huggingface/tasks";
import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
import { getProviderHelper } from "../../lib/getProviderHelper.js";
import type { BaseArgs, Options } from "../../types.js";
import { innerRequest } from "../../utils/request.js";

export type ImageTextToImageArgs = BaseArgs & ImageTextToImageInput;

/**
* This task takes an image and text input and outputs a new generated image.
* Recommended model: black-forest-labs/FLUX.2-dev
*/
export async function imageTextToImage(args: ImageTextToImageArgs, options?: Options): Promise<Blob> {
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
const providerHelper = getProviderHelper(provider, "image-text-to-image");
const payload = await providerHelper.preparePayloadAsync(args);
const { data: res, requestContext } = await innerRequest<Blob>(payload, providerHelper, {
...options,
task: "image-text-to-image",
});
return providerHelper.getResponse(res, requestContext.url, requestContext.info.headers as Record<string, string>);
}
22 changes: 22 additions & 0 deletions packages/inference/src/tasks/cv/imageTextToVideo.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import type { ImageTextToVideoInput } from "@huggingface/tasks";
import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
import { getProviderHelper } from "../../lib/getProviderHelper.js";
import type { BaseArgs, Options } from "../../types.js";
import { innerRequest } from "../../utils/request.js";

export type ImageTextToVideoArgs = BaseArgs & ImageTextToVideoInput;

/**
* This task takes an image and text input and outputs a generated video.
* Recommended model: Lightricks/LTX-Video
*/
export async function imageTextToVideo(args: ImageTextToVideoArgs, options?: Options): Promise<Blob> {
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
const providerHelper = getProviderHelper(provider, "image-text-to-video");
const payload = await providerHelper.preparePayloadAsync(args);
const { data: res, requestContext } = await innerRequest<Blob>(payload, providerHelper, {
...options,
task: "image-text-to-video",
});
return providerHelper.getResponse(res, requestContext.url, requestContext.info.headers as Record<string, string>);
}
2 changes: 2 additions & 0 deletions packages/inference/src/tasks/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ export * from "./cv/imageSegmentation.js";
export * from "./cv/imageToImage.js";
export * from "./cv/imageToText.js";
export * from "./cv/imageToVideo.js";
export * from "./cv/imageTextToImage.js";
export * from "./cv/imageTextToVideo.js";
export * from "./cv/objectDetection.js";
export * from "./cv/textToImage.js";
export * from "./cv/textToVideo.js";
Expand Down
8 changes: 8 additions & 0 deletions packages/tasks/src/pipelines.ts
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,14 @@ export const PIPELINE_DATA = {
name: "Image-Text-to-Text",
modality: "multimodal",
},
"image-text-to-image": {
name: "Image-Text-to-Image",
modality: "multimodal",
},
"image-text-to-video": {
name: "Image-Text-to-Video",
modality: "multimodal",
},
"visual-question-answering": {
name: "Visual Question Answering",
subtasks: [
Expand Down
12 changes: 12 additions & 0 deletions packages/tasks/src/snippets/inputs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,16 @@ const inputsImageToVideo = () => `{
"prompt": "The cat starts to dance"
}`;

const inputsImageTextToImage = () => `{
"image": "cat.png",
"prompt": "Turn the cat into a tiger."
}`;

const inputsImageTextToVideo = () => `{
"image": "cat.png",
"prompt": "The cat starts to dance"
}`;

const inputsImageSegmentation = () => `"cats.jpg"`;

const inputsObjectDetection = () => `"cats.jpg"`;
Expand Down Expand Up @@ -130,6 +140,8 @@ const modelInputSnippets: {
"image-to-text": inputsImageToText,
"image-to-image": inputsImageToImage,
"image-to-video": inputsImageToVideo,
"image-text-to-image": inputsImageTextToImage,
"image-text-to-video": inputsImageTextToVideo,
"image-segmentation": inputsImageSegmentation,
"object-detection": inputsObjectDetection,
"question-answering": inputsQuestionAnswering,
Expand Down
73 changes: 73 additions & 0 deletions packages/tasks/src/tasks/image-text-to-image/about.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
## Use Cases

### Instruction-based Image Editing

Image-text-to-image models can be used to edit images based on natural language instructions. For example, you can provide an image of a summer landscape and the instruction "Make it winter, add snow" to generate a winter version of the same scene.

### Style Transfer

These models can apply artistic styles or transformations to images based on text descriptions. For instance, you can transform a photo into a painting style by providing prompts like "Make it look like a Van Gogh painting" or "Convert to watercolor style."

### Image Variations

Generate variations of an existing image by providing different text prompts. This is useful for creative workflows where you want to explore different versions of the same image with specific modifications.

### Guided Image Generation

Use a reference image along with text prompts to guide the generation process. This allows for more controlled image generation compared to text-to-image models alone, as the reference image provides structural guidance.

### Image Inpainting and Outpainting

Fill in missing or masked parts of an image based on text descriptions, or extend an image beyond its original boundaries with text-guided generation.

## Task Variants

### Instruction-based Editing

Models that follow natural language instructions to edit images, which can perform complex edits like object removal, color changes, and compositional modifications.

### Reference-guided Generation

Models that use a reference image to guide the generation process while incorporating text prompts to control specific attributes or modifications.

### Conditional Image-to-Image

Models that perform specific transformations based on text conditions, such as changing weather conditions, time of day, or seasonal variations.

## Inference

You can use the Diffusers library to interact with image-text-to-image models.

```python
import torch
from diffusers import Flux2Pipeline
from diffusers.utils import load_image

repo_id = "black-forest-labs/FLUX.2-dev"
device = "cuda:0"
torch_dtype = torch.bfloat16

pipe = Flux2Pipeline.from_pretrained(
repo_id, torch_dtype=torch_dtype
)
pipe.enable_model_cpu_offload() #no need to do cpu offload for >80G VRAM carts like H200, B200, etc. and do a `pipe.to(device)` instead

prompt = "Realistic macro photograph of a hermit crab using a soda can as its shell, partially emerging from the can, captured with sharp detail and natural colors, on a sunlit beach with soft shadows and a shallow depth of field, with blurred ocean waves in the background. The can has the text `BFL Diffusers` on it and it has a color gradient that start with #FF5733 at the top and transitions to #33FF57 at the bottom."

#cat_image = load_image("https://huggingface.co/spaces/zerogpu-aoti/FLUX.1-Kontext-Dev-fp8-dynamic/resolve/main/cat.png")
image = pipe(
prompt=prompt,
#image=[cat_image] #multi-image input
generator=torch.Generator(device=device).manual_seed(42),
num_inference_steps=50,
guidance_scale=4,
).images[0]

image.save("flux2_output.png")
```

## Useful Resources

- [FLUX.2 Model Card](https://huggingface.co/black-forest-labs/FLUX.2-dev)
- [Diffusers documentation on Image-to-Image](https://huggingface.co/docs/diffusers/using-diffusers/img2img)
- [ControlNet for Conditional Image Generation](https://huggingface.co/docs/diffusers/using-diffusers/controlnet)
54 changes: 54 additions & 0 deletions packages/tasks/src/tasks/image-text-to-image/data.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import type { TaskDataCustom } from "../index.js";

const taskData: TaskDataCustom = {
datasets: [],
demo: {
inputs: [
{
filename: "image-text-to-image-input.jpeg",
type: "img",
},
{
label: "Input",
content: "A city above clouds, pastel colors, Victorian style",
type: "text",
},
],
outputs: [
{
filename: "image-text-to-image-output.png",
type: "img",
},
],
},
metrics: [
{
description:
"The Fréchet Inception Distance (FID) calculates the distance between distributions between synthetic and real samples. A lower FID score indicates better similarity between the distributions of real and generated images.",
id: "FID",
},
{
description:
"CLIP Score measures the similarity between the generated image and the text prompt using CLIP embeddings. A higher score indicates better alignment with the text prompt.",
id: "CLIP",
},
],
models: [
{
description: "A powerful model for image-text-to-image generation.",
id: "black-forest-labs/FLUX.2-dev",
},
],
spaces: [
{
description: "An application for image-text-to-image generation.",
id: "black-forest-labs/FLUX.2-dev",
},
],
summary:
"Image-text-to-image models take an image and a text prompt as input and generate a new image based on the reference image and text instructions. These models are useful for image editing, style transfer, image variations, and guided image generation tasks.",
widgetModels: ["black-forest-labs/FLUX.2-dev"],
youtubeId: undefined,
};

export default taskData;
75 changes: 75 additions & 0 deletions packages/tasks/src/tasks/image-text-to-image/inference.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
/**
* Inference code generated from the JSON schema spec in ./spec
*
* Using src/scripts/inference-codegen
*/
/**
* Inputs for Image Text To Image inference. Either inputs (image) or prompt (in parameters)
* must be provided, or both.
*/
export interface ImageTextToImageInput {
/**
* The input image data as a base64-encoded string. If no `parameters` are provided, you can
* also provide the image data as a raw bytes payload. Either this or prompt must be
* provided.
*/
inputs?: Blob;
/**
* Additional inference parameters for Image Text To Image
*/
parameters?: ImageTextToImageParameters;
[property: string]: unknown;
}
/**
* Additional inference parameters for Image Text To Image
*/
export interface ImageTextToImageParameters {
/**
* For diffusion models. A higher guidance scale value encourages the model to generate
* images closely linked to the text prompt at the expense of lower image quality.
*/
guidance_scale?: number;
/**
* One prompt to guide what NOT to include in image generation.
*/
negative_prompt?: string;
/**
* For diffusion models. The number of denoising steps. More denoising steps usually lead to
* a higher quality image at the expense of slower inference.
*/
num_inference_steps?: number;
/**
* The text prompt to guide the image generation. Either this or inputs (image) must be
* provided.
*/
prompt?: string;
/**
* Seed for the random number generator.
*/
seed?: number;
/**
* The size in pixels of the output image. This parameter is only supported by some
* providers and for specific models. It will be ignored when unsupported.
*/
target_size?: TargetSize;
[property: string]: unknown;
}
/**
* The size in pixels of the output image. This parameter is only supported by some
* providers and for specific models. It will be ignored when unsupported.
*/
export interface TargetSize {
height: number;
width: number;
[property: string]: unknown;
}
/**
* Outputs of inference for the Image Text To Image task
*/
export interface ImageTextToImageOutput {
/**
* The generated image returned as raw bytes in the payload.
*/
image: unknown;
[property: string]: unknown;
}
Loading