基于onnx模型和onnx runtime推理stable diffusion

这篇具有很好参考价值的文章主要介绍了基于onnx模型和onnx runtime推理stable diffusion。希望对大家有所帮助。如果存在错误或未考虑完全的地方,请大家不吝赐教,您也可以点击"举报违法"按钮提交疑问。

直接用diffusers的pipeline:

import os
from diffusers import OnnxStableDiffusionPipeline, OnnxRuntimeModel
from diffusers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, DPMSolverMultistepScheduler
from transformers import CLIPTextModel, CLIPTokenizer

model_dir = "/mnt/f/deep_learning/onnx_model/stable_diffusio_v1.5/"

prompt = "a photo of an astronaut riding a horse on mars"

num_inference_steps = 20

scheduler = PNDMScheduler.from_pretrained(os.path.join(model_dir, "scheduler/scheduler_config.json"))

tokenizer = CLIPTokenizer.from_pretrained(model_dir, subfolder="tokenizer")

text_encoder = OnnxRuntimeModel(model=OnnxRuntimeModel.load_model(os.path.join(model_dir, "text_encoder/model.onnx")))

# in txt to image, vae_encoder is not necessary, only used in image to image generation
# vae_encoder = OnnxRuntimeModel(model=OnnxRuntimeModel.load_model(os.path.join(model_dir, "vae_encoder/model.onnx")))

vae_decoder = OnnxRuntimeModel(model=OnnxRuntimeModel.load_model(os.path.join(model_dir, "vae_decoder/model.onnx")))
unet = OnnxRuntimeModel(model=OnnxRuntimeModel.load_model(os.path.join(model_dir, "unet/model.onnx")))

pipe = OnnxStableDiffusionPipeline(
    vae_encoder=None,
    vae_decoder=vae_decoder,
    text_encoder=text_encoder,
    tokenizer=tokenizer,
    unet=unet,
    scheduler=scheduler,
    safety_checker=None,
    feature_extractor=None,
    requires_safety_checker=False,
)

image = pipe(prompt, num_inference_steps=num_inference_steps).images[0]

image.save(f"generated_image.png")

在pipeline_onnx_stable_diffusion的基础上修改得到的直接调用onnx模型版本,可以用于其他推理引擎推理参考:

pipe_onnx_simple.py

# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
from typing import Callable, List, Optional, Union

import numpy as np
import torch
from transformers import CLIPTokenizer
from diffusers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, DPMSolverMultistepScheduler
from diffusers import AutoencoderKL, UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTokenizer

from onnx_utils_simple import OnnxRuntimeModel, ORT_TO_NP_TYPE

import logging as logger
from tqdm.auto import tqdm
from PIL import Image


ort_device = "cpu" # gpu


class OnnxStableDiffusionPipeline():
    # vae_encoder: OnnxRuntimeModel
    vae_decoder: OnnxRuntimeModel
    text_encoder: OnnxRuntimeModel
    tokenizer: CLIPTokenizer
    unet: OnnxRuntimeModel
    scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]

    def __init__(self, model_dir):
        # scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],

        # stable-diffusion-v1-5 use PNDMScheduler by default
        self.scheduler = PNDMScheduler.from_pretrained(os.path.join(model_dir, "scheduler/scheduler_config.json"))

        # stable-diffusion-2-1 use DDIMScheduler by default
        # self.scheduler = DDIMScheduler.from_pretrained(os.path.join(model_dir, "scheduler/scheduler_config.json"))
        '''
        self.scheduler = DPMSolverMultistepScheduler(
            beta_start=0.00085,
            beta_end=0.012,
            beta_schedule="scaled_linear",
            num_train_timesteps=1000,
            trained_betas=None,
            predict_epsilon=True,
            thresholding=False,
            algorithm_type="dpmsolver++",
            solver_type="midpoint",
            lower_order_final=True,
        )
        '''
        # self.scheduler = EulerAncestralDiscreteScheduler.from_config(
        # os.path.join(model_dir, "scheduler/scheduler_config.json"))

        # self.tokenizer = BertTokenizer.from_pretrained(os.path.join(model_dir, "./tokenizer"))

        self.tokenizer = CLIPTokenizer.from_pretrained(model_dir, subfolder="tokenizer")

        self.text_encoder = OnnxRuntimeModel(os.path.join(model_dir, "text_encoder/model.onnx"), device=ort_device)

        # in txt to image, vae_encoder is not necessary, only used in image to image generation
        # self.vae_encoder = OnnxRuntimeModel(os.path.join(model_dir, "vae_encoder/model.onnx"))

        self.vae_decoder = OnnxRuntimeModel(os.path.join(model_dir, "vae_decoder/model.onnx"), device=ort_device)
        self.unet = OnnxRuntimeModel(os.path.join(model_dir, "unet/model.onnx"), device=ort_device)

        self.safety_checker = None
        self.requires_safety_checker = False
        self.feature_extractor = False

        self.progress_bar = tqdm

        if hasattr(self.scheduler.config, "steps_offset") and self.scheduler.config.steps_offset != 1:
            deprecation_message = (
                f"The configuration file of this scheduler: {self.scheduler} is outdated. `steps_offset`"
                f" should be set to 1 instead of {self.scheduler.config.steps_offset}. Please make sure "
                "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
                " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
                " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
                " file"
            )
            deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
            new_config = dict(scheduler.config)
            new_config["steps_offset"] = 1
            scheduler._internal_dict = FrozenDict(new_config)

        if hasattr(self.scheduler.config, "clip_sample") and self.scheduler.config.clip_sample is True:
            deprecation_message = (
                f"The configuration file of this scheduler: {self.scheduler} has not set the configuration `clip_sample`."
                " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
                " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
                " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
                " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
            )
            deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
            new_config = dict(self.scheduler.config)
            new_config["clip_sample"] = False
            self.scheduler._internal_dict = FrozenDict(new_config)

        if self.safety_checker is None and self.requires_safety_checker:
            logger.warning(
                f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
                " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
                " results in services or applications open to the public. Both the diffusers team and Hugging Face"
                " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
                " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
                " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
            )

        # if self.safety_checker is not None and self.feature_extractor is None:
        #     raise ValueError(
        #         "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
        #         " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
        #     )

    def check_inputs(
        self,
        prompt: Union[str, List[str]],
        height: Optional[int],
        width: Optional[int],
        callback_steps: int,
        negative_prompt: Optional[str] = None,
        prompt_embeds: Optional[np.ndarray] = None,
        negative_prompt_embeds: Optional[np.ndarray] = None,
    ):
        if height % 8 != 0 or width % 8 != 0:
            raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")

        if (callback_steps is None) or (
            callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
        ):
            raise ValueError(
                f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
                f" {type(callback_steps)}."
            )

        if prompt is not None and prompt_embeds is not None:
            raise ValueError(
                f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
                " only forward one of the two."
            )
        elif prompt is None and prompt_embeds is None:
            raise ValueError(
                "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
            )
        elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
            raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")

        if negative_prompt is not None and negative_prompt_embeds is not None:
            raise ValueError(
                f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
                f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
            )

        if prompt_embeds is not None and negative_prompt_embeds is not None:
            if prompt_embeds.shape != negative_prompt_embeds.shape:
                raise ValueError(
                    "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
                    f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
                    f" {negative_prompt_embeds.shape}."
                )

    def __call__(
        self,
        prompt: Union[str, List[str]] = None,
        height: Optional[int] = 512,
        width: Optional[int] = 512,
        num_inference_steps: Optional[int] = 50,
        guidance_scale: Optional[float] = 7.5,
        negative_prompt: Optional[Union[str, List[str]]] = None,
        num_images_per_prompt: Optional[int] = 1,
        eta: Optional[float] = 0.0,
        generator: Optional[np.random.RandomState] = None,
        latents: Optional[np.ndarray] = None,
        prompt_embeds: Optional[np.ndarray] = None,
        negative_prompt_embeds: Optional[np.ndarray] = None,
        output_type: Optional[str] = "pil",
        return_dict: bool = True,
        callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
        callback_steps: int = 1,
    ):
        r"""
        Function invoked when calling the pipeline for generation.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
                instead.
            image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.FloatTensor`):
                `Image`, or tensor representing an image batch which will be upscaled. *
            num_inference_steps (`int`, *optional*, defaults to 50):
                The number of denoising steps. More denoising steps usually lead to a higher quality image at the
                expense of slower inference.
            guidance_scale (`float`, *optional*, defaults to 7.5):
                Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
                `guidance_scale` is defined as `w` of equation 2. of [Imagen
                Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
                1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
                usually at the expense of lower image quality.
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
                is less than `1`).
            num_images_per_prompt (`int`, *optional*, defaults to 1):
                The number of images to generate per prompt.
            eta (`float`, *optional*, defaults to 0.0):
                Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
                [`schedulers.DDIMScheduler`], will be ignored for others.
            generator (`np.random.RandomState`, *optional*):
                One or a list of [numpy generator(s)](TODO) to make generation deterministic.
            latents (`np.ndarray`, *optional*):
                Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor will ge generated by sampling using the supplied random `generator`.
            prompt_embeds (`np.ndarray`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`np.ndarray`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generate image. Choose between
                [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
                plain tuple.
            callback (`Callable`, *optional*):
                A function that will be called every `callback_steps` steps during inference. The function will be
                called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
            callback_steps (`int`, *optional*, defaults to 1):
                The frequency at which the `callback` function will be called. If not specified, the callback will be
                called at every step.

        Returns:
            [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
            [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
            When returning a tuple, the first element is a list with the generated images, and the second element is a
            list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
            (nsfw) content, according to the `safety_checker`.
        """

        # check inputs. Raise error if not correct
        self.check_inputs(
            prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
        )

        # define call parameters
        if prompt is not None and isinstance(prompt, str):
            batch_size = 1
        elif prompt is not None and isinstance(prompt, list):
            batch_size = len(prompt)
        else:
            batch_size = prompt_embeds.shape[0]

        if generator is None:
            generator = np.random

        # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
        # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
        # corresponds to doing no classifier free guidance.
        do_classifier_free_guidance = guidance_scale > 1.0

        prompt_embeds = self._encode_prompt(
            prompt,
            num_images_per_prompt,
            do_classifier_free_guidance,
            negative_prompt,
            prompt_embeds=prompt_embeds,
            negative_prompt_embeds=negative_prompt_embeds,
        )

        # get the initial random noise unless the user supplied it
        latents_dtype = prompt_embeds.dtype
        latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8)
        if latents is None:
            latents = generator.randn(*latents_shape).astype(latents_dtype)
        elif latents.shape != latents_shape:
            raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")

        # set timesteps
        self.scheduler.set_timesteps(num_inference_steps)

        latents = latents * np.float64(self.scheduler.init_noise_sigma)

        # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
        # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
        # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
        # and should be between [0, 1]
        accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
        extra_step_kwargs = {}
        if accepts_eta:
            extra_step_kwargs["eta"] = eta

        timestep_dtype = next(
            (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
        )
        timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]

        for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
            # expand the latents if we are doing classifier free guidance
            latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
            latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
            latent_model_input = latent_model_input.cpu().numpy()

            # predict the noise residual
            timestep = np.array([t], dtype=timestep_dtype)
            noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)
            noise_pred = noise_pred[0]

            # perform guidance
            if do_classifier_free_guidance:
                noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
                noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)

            # compute the previous noisy sample x_t -> x_t-1
            scheduler_output = self.scheduler.step(
                torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
            )
            latents = scheduler_output.prev_sample.numpy()

            # call the callback, if provided
            if callback is not None and i % callback_steps == 0:
                callback(i, t, latents)

        latents = 1 / 0.18215 * latents
        # image = self.vae_decoder(latent_sample=latents)[0]
        # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
        image = np.concatenate(
            [self.vae_decoder(latent_sample=latents[i: i + 1])[0] for i in range(latents.shape[0])]
        )

        image = np.clip(image / 2 + 0.5, 0, 1)
        image = image.transpose((0, 2, 3, 1))

        return image

    def _encode_prompt(
        self,
        prompt: Union[str, List[str]],
        num_images_per_prompt: Optional[int],
        do_classifier_free_guidance: bool,
        negative_prompt: Optional[str],
        prompt_embeds: Optional[np.ndarray] = None,
        negative_prompt_embeds: Optional[np.ndarray] = None,
    ):
        r"""
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `List[str]`):
                prompt to be encoded
            num_images_per_prompt (`int`):
                number of images that should be generated per prompt
            do_classifier_free_guidance (`bool`):
                whether to use classifier free guidance or not
            negative_prompt (`str` or `List[str]`):
                The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
                if `guidance_scale` is less than `1`).
            prompt_embeds (`np.ndarray`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`np.ndarray`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
        """
        if prompt is not None and isinstance(prompt, str):
            batch_size = 1
        elif prompt is not None and isinstance(prompt, list):
            batch_size = len(prompt)
        else:
            batch_size = prompt_embeds.shape[0]

        if prompt_embeds is None:
            # get prompt text embeddings
            text_inputs = self.tokenizer(
                prompt,
                padding="max_length",
                max_length=self.tokenizer.model_max_length,
                truncation=True,
                return_tensors="np",
            )
            text_input_ids = text_inputs.input_ids
            untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids

            if not np.array_equal(text_input_ids, untruncated_ids):
                removed_text = self.tokenizer.batch_decode(
                    untruncated_ids[:, self.tokenizer.model_max_length - 1: -1]
                )
                logger.warning(
                    "The following part of your input was truncated because CLIP can only handle sequences up to"
                    f" {self.tokenizer.model_max_length} tokens: {removed_text}"
                )

            prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]

        prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)

        # get unconditional embeddings for classifier free guidance
        if do_classifier_free_guidance and negative_prompt_embeds is None:
            uncond_tokens: List[str]
            if negative_prompt is None:
                uncond_tokens = [""] * batch_size
            elif type(prompt) is not type(negative_prompt):
                raise TypeError(
                    f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
                    f" {type(prompt)}."
                )
            elif isinstance(negative_prompt, str):
                uncond_tokens = [negative_prompt] * batch_size
            elif batch_size != len(negative_prompt):
                raise ValueError(
                    f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
                    f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
                    " the batch size of `prompt`."
                )
            else:
                uncond_tokens = negative_prompt

            max_length = prompt_embeds.shape[1]
            uncond_input = self.tokenizer(
                uncond_tokens,
                padding="max_length",
                max_length=max_length,
                truncation=True,
                return_tensors="np",
            )
            negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]

        if do_classifier_free_guidance:
            negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)

            # For classifier free guidance, we need to do two forward passes.
            # Here we concatenate the unconditional and text embeddings into a single batch
            # to avoid doing two forward passes
            prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])

        return prompt_embeds

    @staticmethod
    def numpy_to_pil(images):
        """
        Convert a numpy image or a batch of images to a PIL image.
        """
        if images.ndim == 3:
            images = images[None, ...]
        images = (images * 255).round().astype("uint8")
        if images.shape[-1] == 1:
            # special case for grayscale (single channel) images
            pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
        else:
            pil_images = [Image.fromarray(image) for image in images]

        return pil_images


model_dir = "/mnt/f/deep_learning/onnx_model/stable_diffusio_v1.5/"

prompt = "a photo of an astronaut riding a horse on mars"

num_inference_steps = 20

onnx_pipe = OnnxStableDiffusionPipeline(model_dir)
image = onnx_pipe(prompt, num_inference_steps=num_inference_steps)

images = onnx_pipe.numpy_to_pil(image)

for i, image in enumerate(images):
    image.save(f"generated_image_{i}.png")

onnx_utils_simple.py

import logging as logger
import numpy as np
import os
import onnxruntime as ort


ORT_TO_NP_TYPE = {
    "tensor(bool)": np.bool_,
    "tensor(int8)": np.int8,
    "tensor(uint8)": np.uint8,
    "tensor(int16)": np.int16,
    "tensor(uint16)": np.uint16,
    "tensor(int32)": np.int32,
    "tensor(uint32)": np.uint32,
    "tensor(int64)": np.int64,
    "tensor(uint64)": np.uint64,
    "tensor(float16)": np.float16,
    "tensor(float)": np.float32,
    "tensor(double)": np.float64,
}


class OnnxRuntimeModel:
    def __init__(self, model_path, device="cpu"):
        self.model = None

        providers = ["CPUExecutionProvider"]
        if device == "gpu":
            providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]

        if model_path:
            self.load_model(model_path, providers)

    def __call__(self, **kwargs):
        inputs = {k: np.array(v) for k, v in kwargs.items()}
        return self.model.run(None, inputs)

    def load_model(self, path: str, providers=None, sess_options=None):
        """
        Loads an ONNX Inference session with an ExecutionProvider. Default provider is `CPUExecutionProvider`

        Arguments:
            path (`str` or `Path`):
                Directory from which to load
            provider(`str`, *optional*):
                Onnxruntime execution provider to use for loading the model, defaults to `CPUExecutionProvider`
        """
        if providers is None:
            logger.info("No onnxruntime provider specified, using CPUExecutionProvider")
            providers = ["CPUExecutionProvider"]  # "CUDAExecutionProvider",

        self.model = ort.InferenceSession(path, providers=providers, sess_options=sess_options)

生成1张512x512图的shape信息文章来源地址https://www.toymoban.com/news/detail-511874.html

txt encoder
input_ids (1, 77)
results shape: (1, 77, 768)
results shape: (1, 768)

unet
sample (2, 4, 64, 64)
timestep (1,)
encoder_hidden_states (2, 77, 768)
results shape: (2, 4, 64, 64)

vae_decoder
latent_sample (1, 4, 64, 64)
results shape: (1, 3, 512, 512)

到了这里,关于基于onnx模型和onnx runtime推理stable diffusion的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处: 如若内容造成侵权/违法违规/事实不符,请点击违法举报进行投诉反馈,一经查实,立即删除!

领支付宝红包 赞助服务器费用

相关文章

  • 【AI绘图 丨 Stable_diffusion 系列教程三】— 无需配置环境 | Colab直接运行Stable Diffusion web UI并从civitai中导入lora模型

    目录 一、Google Colab的基本介绍 二、Google Colab 的使用  2.1 利用Google 浏览器搜索Google drive  2.2 注册一个Google 账号,点击转至账号登录  ​ 2.3 进入Google drive,点击转至Google 云端硬盘 2.4 创建一个新的Google Colaboratory 2.5 Google Colaboratory基本操作方法。 基本操作1:修改文件名、

    2024年02月09日
    浏览(45)
  • AIGC与AidLux互联应用——AidLux端AIGC测评(二)PC端&云端Stable Diffusion模型推理应用(文生图,图生图)

    整体运行架构 Stable Diffusion模型搭建首先下载diffusers,然后安装,命令如下: git clone https://github.com/huggingface/diffusers.git pip install diffusers cd diffusers pip install . ubuntu和win系统下都可以 文生图,图生图代码和训练好的模型见百度网盘(训练好的模型很大,十几个g) 修改txt2jpg_in

    2024年02月09日
    浏览(52)
  • 详细介绍 Yolov5 转 ONNX模型 + 使用ONNX Runtime 的 Python 部署(包含官方文档的介绍)

    对ONNX的介绍强烈建议看,本文做了很多参考:模型部署入门教程(一):模型部署简介 模型部署入门教程(三):PyTorch 转 ONNX 详解 以及Pytorch的官方介绍:(OPTIONAL) EXPORTING A MODEL FROM PYTORCH TO ONNX AND RUNNING IT USING ONNX RUNTIME C++的部署:详细介绍 Yolov5 转 ONNX模型 + 使用 ONNX Runti

    2024年02月01日
    浏览(47)
  • VS c++ onnxruntime 环境配置、onnx教程、部署推理模型、sklearn pkl模型转onnx、问题汇总

    目录 一、初步认识ONNX 二、pkl转ONNX+可视化模型 三、ONNX Runtime运行时 3.1 相关介绍(了解此运行时): 3.2 VS、c++部署onnxruntime 3.3 头文件引用的一些问题 四、问题汇总: 1. 类没有成员 2. 版本兼容问题 3. 3.“GetInputName“: 不是 “Ort::Session“ 的成员 官网: ONNX Runtime | Home GitHub

    2024年04月09日
    浏览(39)
  • 【超详细教程】无需配置环境 | colab直接运行Stable Diffusion web UI并从civitai中导入lora模型

    stable diffusion web ui地址 https://github.com/AUTOMATIC1111/stable-diffusion-webui 在项目readme里面找到Installation and Running,可以用colab在线使用,无需配置环境**(前提是可以连上Google)** 点击List of Online Services 跳转到有多种不同人维护的colab在线仓库 地址给到下面 这里我们选择第二个maint

    2023年04月09日
    浏览(44)
  • 使用Tools for AI封装onnx模型并推理

    进行这一步之前,请确保已正确安装配置了Visual Studio 2017 和 Microsoft Visual Studio Tools for AI环境。 项目的代码也可以在这里找到,下面的步骤是带着大家从头到尾做一遍。 创建Windows窗体应用(.NET Framework)项目,这里给项目起名ClassifyBear。 注意,项目路径不要包含中文。 在解决

    2024年02月20日
    浏览(39)
  • onnx模型转engine并进行推理全过程解析

    深度学习模型在训练好以后,下一步就是部署到不同的设备进行测试,不同设备之间的转换一般可以通过中间件ONNX进行转换,以达到不同平台的通用。本文以模型转为ONNX为起点,分析介绍ONNX转为TensorRT Engine并进行推理的整个流程链路。 ONNX序列化为TRT模型的整个流程可以用

    2024年02月06日
    浏览(37)
  • OpenCV DNN模块推理YOLOv5 ONNX模型方法

    本文档主要描述 python 平台,使用 opencv-python 深度神经网络模块 dnn ,推理 YOLOv5 模型的方法。 文档主要包含以下内容: opencv-python 模块的安装 YOLOv5 模型格式的说明 ONNX 格式模型的加载 图片数据的预处理 模型推理 推理结果后处理,包括 NMS , cxcywh 坐标转换为 xyxy 坐标等 关键方

    2024年02月16日
    浏览(51)
  • OpenMMlab导出mobilenet-v2的onnx模型并推理

    使用mmpretrain导出mobilenet-v2的onnx模型: 安装有mmdeploy的话可以通过如下方法导出: 通过onnxruntime进行推理: 使用mmdeploy推理: 或者 这里通过trtexec转换onnx文件,LZ的版本是TensorRT-8.2.1.8。 使用mmdeploy推理: 或者

    2024年02月05日
    浏览(47)
  • C++使用onnxruntime/opencv对onnx模型进行推理(附代码)

    结果: current image classification : French bulldog, possible : 16.17 对两张图片同时进行推理 current image classification : French bulldog, possible : 16.17 current image class ification : hare, possible : 8.47 https://download.csdn.net/download/qq_44747572/87810859 https://blog.csdn.net/qq_44747572/article/details/131631153

    2024年02月05日
    浏览(48)

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

博客赞助

微信扫一扫打赏

请作者喝杯咖啡吧~博客赞助

支付宝扫一扫领取红包,优惠每天领

二维码1

领取红包

二维码2

领红包