Skip to content

Commit

Permalink
documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
pedro-devv committed Apr 18, 2024
1 parent f070d18 commit 95c6dd3
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 44 deletions.
5 changes: 0 additions & 5 deletions crates/edgen_rt_image_generation_candle/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,11 +159,6 @@ fn generate_image(

device.set_seed(args.seed.unwrap_or(random::<u64>()))?;

// let which = match sd_version {
// StableDiffusionVersion::Xl | StableDiffusionVersion::Turbo => vec![true, false],
// _ => vec![true],
// };

let which = if model.clip2_weights.is_some() {
vec![true, false]
} else {
Expand Down
53 changes: 14 additions & 39 deletions crates/edgen_server/src/image_generation.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use crate::audio::ChatCompletionError;

Check warning on line 1 in crates/edgen_server/src/image_generation.rs

View workflow job for this annotation

GitHub Actions / Linux-x86_64

unused import: `crate::audio::ChatCompletionError`

Check warning on line 1 in crates/edgen_server/src/image_generation.rs

View workflow job for this annotation

GitHub Actions / macOS-universal

unused import: `crate::audio::ChatCompletionError`

Check warning on line 1 in crates/edgen_server/src/image_generation.rs

View workflow job for this annotation

GitHub Actions / macOS-universal

unused import: `crate::audio::ChatCompletionError`

Check warning on line 1 in crates/edgen_server/src/image_generation.rs

View workflow job for this annotation

GitHub Actions / Windows-x86_64

unused import: `crate::audio::ChatCompletionError`
use crate::model_descriptor::{ModelDescriptor, ModelDescriptorError, ModelPaths, Quantization};
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
Expand All @@ -12,6 +13,8 @@ use thiserror::Error;
use utoipa::ToSchema;

/// A request to generate images for the provided context.
/// This request is not at all conformant with OpenAI's API, as that one is very bare-bones, lacking
/// in many parameters that we need.
///
/// An `axum` handler, [`image_generation`][image_generation], is provided to handle this request.
///
Expand Down Expand Up @@ -58,8 +61,11 @@ pub struct CreateImageGenerationRequest<'a> {
pub vae_scale: Option<f64>,
}

/// This request is not at all conformant with OpenAI's API, as that one returns a URL to the
/// generated image, and that is not possible for Edgen.
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct ImageGenerationResponse {
/// A vector containing the byte data of the generated images.
pub images: Vec<Vec<u8>>,
}

Expand All @@ -85,6 +91,14 @@ impl IntoResponse for ImageGenerationError {
}
}

/// POST `/v1/image/generations`: generate image for the provided parameters
///
/// The API of this endpoint is not at all conformant with OpenAI's API, as that one is very
/// bare-bones, lacking in many parameters that we need, and also returns an URL, which Edgen
/// cannot do.
///
/// On failure, may raise a `500 Internal Server Error` with a JSON-encoded [`ImageGenerationError`]
/// to the peer..
#[utoipa::path(
post,
path = "/image/generations",
Expand All @@ -97,38 +111,6 @@ responses(
pub async fn generate_image(
Json(req): Json<CreateImageGenerationRequest<'_>>,
) -> Result<impl IntoResponse, ImageGenerationError> {
// let mut unet = Model::new(
// ModelKind::ImageDiffusion,
// "unet/diffusion_pytorch_model.fp16.safetensors",
// "stabilityai/stable-diffusion-2-1",
// &PathBuf::from(settings::image_generation_dir().await),
// );
// unet.preload(Endpoint::ImageGeneration).await?;
//
// let mut vae = Model::new(
// ModelKind::ImageDiffusion,
// "vae/diffusion_pytorch_model.fp16.safetensors",
// "stabilityai/stable-diffusion-2-1",
// &PathBuf::from(settings::image_generation_dir().await),
// );
// vae.preload(Endpoint::ImageGeneration).await?;
//
// let mut tokenizer = Model::new(
// ModelKind::ImageDiffusion,
// "tokenizer.json",
// "openai/clip-vit-base-patch32",
// &PathBuf::from(settings::image_generation_dir().await),
// );
// tokenizer.preload(Endpoint::ImageGeneration).await?;
//
// let mut clip = Model::new(
// ModelKind::ImageDiffusion,
// "text_encoder/model.fp16.safetensors",
// "stabilityai/stable-diffusion-2-1",
// &PathBuf::from(settings::image_generation_dir().await),
// );
// clip.preload(Endpoint::ImageGeneration).await?;

let descriptor = crate::model_descriptor::get(req.model.as_ref())?;
let model_files;
let default_steps;
Expand Down Expand Up @@ -164,13 +146,6 @@ pub async fn generate_image(
let endpoint = CandleImageGenerationEndpoint {};
let images = endpoint
.generate_image(
// ModelFiles {
// tokenizer: tokenizer.file_path()?,
// clip_weights: clip.file_path()?,
// clip2_weights: None,
// vae_weights: vae.file_path()?,
// unet_weights: unet.file_path()?,
// },
model_files,
ImageGenerationArgs {
prompt: req.prompt.to_string(),
Expand Down
77 changes: 77 additions & 0 deletions docs/src/app/api-reference/image/page.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
export const metadata = {
title: 'Image',
description: 'Generate images',
}

# Chat

Generate images from text. {{ className: 'lead' }}

---

<Row>
<Col>

## Create image {{tag:'POST', label:'http://localhost:33322/v1/image/generations'}}

Given a text prompt, generate 1 or more images according to the prompt.

### Required attributes

<Properties>
<Property name="prompt" type="string">
A description of the images to be generated.
</Property>
</Properties>

<Properties>
<Property name="model" type="string">
The model used for image generations. (WARNING: At the moment only "stable-diffusion-2-1" is allowed)
<ul>
<li>
If the model name is "default", the chat model from the configuration is used (see [Documentation &raquo; Configuration](/documentation/configuration) for details).
</li>
<li>
If the model name is a valid model name recognized by Edgen, it is what is used.
</li>
</ul>

</Property>
</Properties>

</Col>
<Col sticky>

<ButtonRow types={["Default"]}>

<div>
<CodeGroup title="Request" tag="POST" label="/v1/chat/completions">

```bash {{ title: 'cURL' }}
curl http://localhost:33322/v1/image/generations \
-H "Content-Type: application/json" \
-H "Authorization: Bearer no-key-required" \
-d '{
"model": "stable-diffusion-2-1",
"prompt": "A rusty robot"
}'
```

```python
API NOT DONE YET
```

```ts
API NOT DONE YET
```

</CodeGroup>

```json {{ title: 'Response' }}
{"images": [[123, 234, ..., 231, 213]]}
```
</div>
</ButtonRow>

</Col>
</Row>

0 comments on commit 95c6dd3

Please sign in to comment.