.Net接入AzureOpenAI、OpenAI、通义千问、智谱AI、讯飞星火、文心一言大语言模型。

这篇具有很好参考价值的文章主要介绍了.Net接入AzureOpenAI、OpenAI、通义千问、智谱AI、讯飞星火、文心一言大语言模型。。希望对大家有所帮助。如果存在错误或未考虑完全的地方,请大家不吝赐教,您也可以点击"举报违法"按钮提交疑问。

前言

现在在网上搜索.NET接入大模型的帖子很少,有些官方案例只提供java和python的SDK,所以有了这篇.Net的接入大模型文章,目前仅实现对话模型的调用。
这里仅举例通义千问,其他模型实现可以参考Gi他Hub 对您有帮助的话帮忙点个star
个人博客:FaceMan' Blog 。
Github:FaceMan' GitHub 。文章来源地址https://www.toymoban.com/news/detail-813086.html

实现方式

1. 创建IModelExtensionsChatCompletionService对话服务,规范对话服务应实现的接口。

``` csharp
public interface IModelExtensionsChatCompletionService
{
	 /// <summary>
	 /// 对话
	 /// </summary>
	 /// <param name="chatHistory">对话历史</param>
	 /// <param name="settings">参数配置</param>
	 /// <param name="kernel">SK的kernel</param>
	 /// <param name="cancellationToken">是否取消</param>
	 /// <returns></returns>
	 Task<ChatMessageContent> GetChatMessageContentsAsync(ChatHistory chatHistory, OpenAIPromptExecutionSettings settings = null, Kernel kernel = null, CancellationToken cancellationToken = default);

	 /// <summary>
	 /// 流式对话
	 /// </summary>
	 /// <param name="chatHistory">对话历史</param>
	 /// <param name="settings">参数配置</param>
	 /// <param name="kernel">SK的kernel</param>
	 /// <param name="cancellationToken">是否取消</param>
	 /// <returns></returns>
	 IAsyncEnumerable<string> GetStreamingChatMessageContentsAsync(ChatHistory chatHistory, OpenAIPromptExecutionSettings settings = null, Kernel kernel = null, CancellationToken cancellationToken = default);
 }
```

2. 创建ModelClient类做数据解析

```
public class ModelClient : IDisposable
{
    internal readonly HttpClient HttpClient = null!;

    public ModelClient(string apiKey, ModelType modelType, HttpClient? httpClient = null)
    {
        HttpClient = httpClient ?? new HttpClient();
        switch (modelType)
        {
            case ModelType.ZhiPu:
                int expirationInSeconds = 3600; // 设置过期时间为1小时
                apiKey = GenerateJwtToken(apiKey, expirationInSeconds);
                HttpClient.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", apiKey);
                break;
            case ModelType.QianWen:
                HttpClient.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", apiKey);
                break;
            case ModelType.XunFei:
                break;

        }
        QianWen = new QianWenClient(this);
        ZhiPu = new ZhiPuClient(this);
        XunFei = new XunFeiClient(this);
        WenXin = new WenXinClient(this);
    }

    public QianWenClient QianWen { get; set; }
    public ZhiPuClient ZhiPu { get; set; }
    public XunFeiClient XunFei { get; set; }
    public WenXinClient WenXin { get; set; }
    /// <summary>
    /// 处理基础HTTP客户端。
    /// </summary>
    public void Dispose() => HttpClient.Dispose();

    /// <summary>
    /// 数据流转换器
    /// </summary>
    /// <typeparam name="T"></typeparam>
    /// <param name="response">响应体</param>
    /// <param name="cancellationToken"></param>
    /// <returns></returns>
    /// <exception cref="Exception"></exception>
    internal static async Task<T> ReadResponse<T>(HttpResponseMessage response, CancellationToken cancellationToken)
    {
        if (!response.IsSuccessStatusCode)
        {
            throw new Exception(await response.Content.ReadAsStringAsync());
        }
        try
        {
            var debug = await response.Content.ReadAsStringAsync();
            return (await response.Content.ReadFromJsonAsync<T>(options: null, cancellationToken))!;
        }
        catch (Exception e) when (e is NotSupportedException or System.Text.Json.JsonException)
        {
            throw new Exception($"未能将以下json转换为: {typeof(T).Name}: {await response.Content.ReadAsStringAsync()}", e);
        }
    }
    /// <summary>
    /// 讯飞星火 数据流转换器
    /// </summary>
    /// <typeparam name="T"></typeparam>
    /// <param name="receivedMessage"></param>
    /// <returns></returns>
    public static XunFeiResponseWrapper ReadResponse<T>(string receivedMessage)
    {
        XunFeiResponseWrapper response = JsonConvert.DeserializeObject<XunFeiResponseWrapper>(receivedMessage);
        return response;
    }

    /// <summary>
    /// 智谱生成JWT令牌
    /// </summary>
    /// <param name="apiKey"></param>
    /// <param name="expSeconds"></param>
    /// <returns></returns>
    /// <exception cref="ArgumentException"></exception>
    internal string GenerateJwtToken(string apiKey, int expSeconds)
    {
        // 分割API Key以获取ID和Secret
        var parts = apiKey.Split('.');
        if (parts.Length != 2)
        {
            throw new ArgumentException("Invalid API key format.");
        }

        var id = parts[0];
        var secret = parts[1];

        // 创建Header信息
        var header = new JwtHeader(new SigningCredentials(
            new SymmetricSecurityKey(Encoding.UTF8.GetBytes(secret)), SecurityAlgorithms.HmacSha256))
            {
                {"sign_type", "SIGN"}
            };

        // 创建Payload信息
        long currentMillis = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
        var payload = new JwtPayload
        {
            {"api_key", id},
            {"exp", currentMillis + expSeconds * 1000},
            {"timestamp", currentMillis}
        };

        // 生成JWT Token
        var token = new JwtSecurityToken(header, payload);

        return new JwtSecurityTokenHandler().WriteToken(token);
    }
}
```

3. 定义ModelType区分不同模型供应商

```
public enum ModelType
{
	[Description("通义千问")]
	[EnumName("通义千问")]
	QianWen = 1,

	[Description("智谱AI")]
	[EnumName("智谱AI")]
	ZhiPu,

	[Description("科大讯飞")]
	[EnumName("科大讯飞")]
	XunFei,

	[Description("文心一言")]
	[EnumName("文心一言")]
	WenXin,
}
```

4. 以通义千问为例,创建QianWenChatCompletionService类继承IModelExtensionsChatCompletionService

```
public class QianWenChatCompletionService : IModelExtensionsChatCompletionService
{
	private readonly string _apiKey;
	private readonly string _model;
	public QianWenChatCompletionService(string key, string model)
	{
		_apiKey = key;
		_model = model;
	}
	/// <summary>
	/// 对话
	/// </summary>
	/// <param name="chatHistory"></param>
	/// <param name="settings"></param>
	/// <param name="kernel"></param>
	/// <param name="cancellationToken"></param>
	/// <returns></returns>
	public async Task<ChatMessageContent> GetChatMessageContentsAsync(ChatHistory chatHistory, OpenAIPromptExecutionSettings settings = null, Kernel kernel = null, CancellationToken cancellationToken = default)
	{
		var histroyList = new List<ChatMessage>();
		ChatParameters chatParameters = null;
		foreach (var item in chatHistory)
		{
			var history = new ChatMessage()
			{
				Role = item.Role.Label,
				Content = item.Content,
			};
			histroyList.Add(history);
		}
		if (settings != null)
		{
			chatParameters = new ChatParameters()
			{
				TopP = settings != null ? (float)settings.TopP : default,
				MaxTokens = settings != null ? settings.MaxTokens : default,
				Temperature = settings != null ? (float)settings.Temperature : default,
				Seed = settings.Seed != null ? (ulong)settings.Seed : default,
				Stop = settings != null ? settings.StopSequences : default,
				//RepetitionPenalty = (float)settings.FrequencyPenalty,
				//TopK = (int)settings.PresencePenalty
			};
		}
		ModelClient client = new(_apiKey, ModelType.QianWen);
		QianWenResponseWrapper result = await client.QianWen.GetChatMessageContentsAsync(_model, histroyList, chatParameters, cancellationToken);
		var message = new ChatMessageContent(AuthorRole.Assistant, result.Output.Text);
		return message;
	}

	/// <summary>
	/// 流式对话
	/// </summary>
	/// <param name="chatHistory"></param>
	/// <param name="settings"></param>
	/// <param name="kernel"></param>
	/// <param name="cancellationToken"></param>
	/// <returns></returns>
	public async IAsyncEnumerable<string> GetStreamingChatMessageContentsAsync(ChatHistory chatHistory, OpenAIPromptExecutionSettings settings = null, Kernel kernel = null, CancellationToken cancellationToken = default)
	{
		var histroyList = new List<ChatMessage>();
		ChatParameters chatParameters = null;
		foreach (var item in chatHistory)
		{
			var history = new ChatMessage()
			{
				Role = item.Role.Label,
				Content = item.Content,
			};
			histroyList.Add(history);
		}
		if (settings != null)
		{
			chatParameters = new ChatParameters()
			{
				TopP = settings != null ? (float)settings.TopP : default,
				MaxTokens = settings != null ? settings.MaxTokens : default,
				Temperature = settings != null ? (float)settings.Temperature : default,
				Seed = settings.Seed != null ? (ulong)settings.Seed : default,
				Stop = settings != null ? settings.StopSequences : default,
			};
		}
		ModelClient client = new(_apiKey, ModelType.QianWen);

		await foreach (string item in client.QianWen.GetStreamingChatMessageContentsAsync(_model, histroyList, chatParameters, cancellationToken))
		{
			yield return item;
		}
	}
}
```
其中,OpenAIPromptExecutionSettings 和 ChatHistory 来自于SK框架,ChatParameters属于自定义的参数类,因为每家模型供应商都不一样。

5. ChatParameters的代码

```
public record ChatParameters
{
	/// <summary>
	/// 结果的格式-“text”为旧文本版本,“message”为OpenAI兼容消息。
	/// <para>对于语言模型,此字段必须是中的“text”,而不是VL模型中使用的字段</para>
	/// </summary>
	[JsonPropertyName("result_format")]
	public string? ResultFormat { get; set; }

	/// <summary>
	/// 随机数生成器的种子,用于控制模型生成的随机性。
	/// 使用相同的种子允许模型输出的再现性。
	/// <para>此字段为可选字段。默认值为1234。</para>
	/// </summary>
	[JsonPropertyName("seed")]
	public ulong? Seed { get; set; }

	/// <summary>
	/// 限制要生成的令牌数量。限制设置了最大值,但不能保证
	/// 确切地说,将生成那么多令牌。此字段是可选的。
	/// <para>qwen turbo和qwen max longcontext的最大值和默认值为1500。</para>
	/// <para>qwen max、qwen-max-1201和qwen plus的最大值和默认值为2048。</para>
	/// </summary>
	[JsonPropertyName("max_tokens")]
	public int? MaxTokens { get; set; }

	/// <summary>
	/// 细胞核取样的概率阈值。以0.8的值为例,
	/// 仅保留累积概率总和大于或等于0.8的令牌。
	/// <para>取值范围为(0,1.0)。取值越大,随机性越高</para>
	/// <para>值越小,随机性越低。此字段是可选的.</para>
	/// <para>默认值为0.8。请注意,该值不应大于或等于1.</para>
	/// </summary>
	[JsonPropertyName("top_p")]
	public float? TopP { get; set; }

	/// <summary>
	/// 要采样的候选集的大小。例如,当设置为50时,只有前50个令牌
	/// 将考虑进行采样。此字段是可选的。较大的值会增加随机性;
	/// 较小的值会增加确定性。注意:如果top_ k为null或大于100,
	/// 没有使用topk策略,只有topp是有效的。默认值为null。
	/// </summary>
	[JsonPropertyName("top_k")]
	public int? TopK { get; set; }

	/// <summary>
	/// 为减少模型生成中的冗余而应用重复的惩罚。
	/// 值为1.0表示没有惩罚。此字段是可选的。
	/// <para>默认值为1.1。</para>
	/// </summary>
	[JsonPropertyName("repetition_penalty")]
	public float? RepetitionPenalty { get; set; }

	/// <summary>
	/// 控制文本生成的随机性和多样性程度。
	/// 高温度值会降低概率分布的峰值、
	/// 允许选择更多低概率词,从而产生更多样化的输出。
	/// <para>
	/// 低温度值会增加峰度,使高概率词更有可能被选中、
	/// 从而使输出结果更加确定。此字段为可选项。
	/// 数值范围为 [0, 2)。系统默认值为 1.0。
	/// </para>
	/// </summary>
	[JsonPropertyName("temperature")]
	public float? Temperature { get; set; }

	/// <summary>
	/// 指定生成后应停止模型进一步输出的内容。
	/// <para>这可以是一个字符串或字符串列表、一个标记 ID 列表或一个标记 ID 列表。
	/// <para>例如,如果将 stop 设置为 "hello",则在生成 "hello "之前停止生成;</para
	/// <para>如果设置为[37763, 367],则在生成相当于 "Observation "的标记 ID 之前停止生成。
	/// <para>
	/// 注意,此字段为可选字段,列表模式不支持字符串和标记 ID 混合使用;</para> <para>
	/// /// 注意,此字段为可选字段,列表模式不支持字符串和令牌 ID 混合使用。
	/// </para>
	/// </summary>
	[JsonPropertyName("stop")]
	public object? Stop { get; set; }

	///<summary>
	///控制在生成过程中是否考虑搜索结果。
	///<para>注意:启用搜索并不保证会使用搜索结果</para>
	///<para>
	///如果启用了搜索,则模型会将搜索结果视为提示的一部分以潜在地生成包括结果的文本。
	///</para>
	///<para>此字段为可选字段,默认为false</段落>
	///</summary>
	[JsonPropertyName("enable_search")]
	public bool? EnableSearch { get; set; }

	/// <summary>
	///控制是否启用增量输出模式。
	///<para>
	///默认值为false,这意味着后续输出将包含已完成的段。
	///当设置为true时,将激活增量输出模式,并且后续输出将不包含
	///之前的片段。完整的输出将需要由用户逐步构建。
	///</para>
	///此字段是可选的,仅适用于流输出模式。
	/// </summary>
	[JsonPropertyName("incremental_output")]
	public bool? IncrementalOutput { get; set; }

	/// <summary>
	/// 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。
	/// 如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data: [DONE] 消息。
	/// </summary>
	[JsonPropertyName("stream")]
	public bool Stream { get; set; }

	/// <summary>
	/// 智谱: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效
	/// </summary>
	[JsonPropertyName("do_sample")]
	public bool DoSample { get; set; }

	/// <summary>
	/// 文心
	/// 通过对已生成的token增加惩罚,减少重复生成的现象。
	/// 说明:(1)值越大表示惩罚越大。
	/// (2)默认1.0,取值范围:[1.0, 2.0]。
	/// </summary>
	[JsonPropertyName("penalty_score")]
	public float? PenaltyScore { get; set; }

	/// <summary>
	/// 文心
	/// 模型人设,主要用于人设设定。
	/// 例如,你是xxx公司制作的AI助手,
	/// 说明:(1)长度限制1024个字符
	/// (2)如果使用functions参数,不支持设定人设system
	/// </summary>
	[JsonPropertyName("system")]
	public string System { get; set; }

	/// <summary>
	/// 文心
	/// 强制关闭实时搜索功能,默认false,表示不关闭
	/// </summary>
	[JsonPropertyName("disable_search")]
	public bool DisableSearch { get; set; }

	/// <summary>
	/// 文心
	/// 是否开启上角标返回
	/// </summary>
	[JsonPropertyName("enable_citation")]
	public bool EnableCitation { get; set; }

	/// <summary>
	/// 文心
	/// 鉴权参数
	/// </summary>
	public string Token { get; set; }
}
```

6. 创建QianWenClienthttp请求类

```
 public class QianWenClient
 {
	 /// <summary>
	 /// 基础请求地址
	 /// </summary>
	 private readonly string baseUrl = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation";
	 internal QianWenClient(ModelClient parent)
	 {
		 Parent = parent;
	 }
	 internal ModelClient Parent { get; }

	 public async Task<QianWenResponseWrapper> GetChatMessageContentsAsync(string model, IReadOnlyList<ChatMessage> messages, ChatParameters? parameters = null, CancellationToken cancellationToken = default)
	 {
		 HttpRequestMessage httpRequest = new(HttpMethod.Post, baseUrl)
		 {
			 Content = JsonContent.Create(QianWenRequestWrapper.Create(model, new
			 {
				 messages,
			 }, parameters), options: new JsonSerializerOptions
			 {
				 DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
			 }),
		 };
		 HttpResponseMessage resp = await Parent.HttpClient.SendAsync(httpRequest, cancellationToken);
		 return await ModelClient.ReadResponse<QianWenResponseWrapper>(resp, cancellationToken);
	 }

	 public async IAsyncEnumerable<string> GetStreamingChatMessageContentsAsync(string model,
	 IReadOnlyList<ChatMessage> messages,
	 ChatParameters? parameters = null,
	 [EnumeratorCancellation] CancellationToken cancellationToken = default)
	 {
		 HttpRequestMessage httpRequest = new(HttpMethod.Post, baseUrl)
		 {
			 Content = JsonContent.Create(QianWenRequestWrapper.Create(model, new
			 {
				 messages,
			 }, parameters), options: new JsonSerializerOptions
			 {
				 DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
			 }),
		 };
		 httpRequest.Headers.Accept.Add(new MediaTypeWithQualityHeaderValue("text/event-stream"));
		 httpRequest.Headers.TryAddWithoutValidation("X-DashScope-SSE", "enable");

		 using HttpResponseMessage resp = await Parent.HttpClient.SendAsync(httpRequest, HttpCompletionOption.ResponseHeadersRead, cancellationToken);
		 if (!resp.IsSuccessStatusCode)
		 {
			 throw new Exception(await resp.Content.ReadAsStringAsync());
		 }

		 string lastText = string.Empty; // 记录上一次返回的数据
		 using StreamReader reader = new(await resp.Content.ReadAsStreamAsync(), Encoding.UTF8);
		 while (!reader.EndOfStream)
		 {
			 if (cancellationToken.IsCancellationRequested) throw new TaskCanceledException();

			 string? line = await reader.ReadLineAsync();
			 if (line != null && line.StartsWith("data:"))
			 {
				 string data = line["data:".Length..];
				 if (data.StartsWith("{\"code\":"))
				 {
					 throw new Exception(data);
				 }
				 var result = JsonSerializer.Deserialize<QianWenResponseWrapper>(data)!;
				 // 获取新增加的部分数据并返回
				 int commonPrefixLength = 0;
				 while (commonPrefixLength < lastText.Length && commonPrefixLength < result.Output.Text.Length && lastText[commonPrefixLength] == data[commonPrefixLength])
				 {
					 commonPrefixLength++;
				 }
				 // 获取新增加的文本部分并返回
				 string newText = result.Output.Text;
				 string addedText = newText.Substring(lastText.Length);

				 lastText = newText;

				 yield return addedText;
			 }
		 }
	 }
 }
```

7. 创建QianWenResponseWrapper基本响应类

/// <summary>
/// 用于映像请求异步任务的通用基本响应类。
/// </summary>
public record QianWenResponseWrapper
{
    /// <summary>
    /// The identifier corresponds to each individual request.
    /// </summary>
    [JsonPropertyName("request_id")]
    public string RequestId { get; init; }

    /// <summary>
    /// The processed task status response associated with the respective request.
    /// </summary>
    [JsonPropertyName("output")]
    public QianWenChatOutput Output { get; init; }

    /// <summary>
    /// Usage of the request.
    /// </summary>
    [JsonPropertyName("usage")]
    public QianWenChatTokenUsage? Usage { get; init; }
}
/// <summary>
/// 聊天请求的令牌使用情况。
/// </summary>
public record QianWenChatTokenUsage
{
    /// <summary>
    /// 输出消息的令牌计数。
    /// </summary>
    [JsonPropertyName("output_tokens")]
    public int OutputTokens { get; init; }

    /// <summary>
    /// 输入消息的令牌计数。
    /// </summary>
    [JsonPropertyName("input_tokens")]
    public int InputTokens { get; init; }
}
/// <summary>
/// 聊天请求的输出。
/// </summary>
public record QianWenChatOutput
{
    /// <summary>
    /// 模型的输出内容。
    /// </summary>
    [JsonPropertyName("text")]
    public string Text { get; init; }

    /// <summary>
    /// 有3种情况:
    /// <list type="bullet">
    /// <item><c>null</c>正在生成</item>
    /// <item><c>stop</c> 停止了</item>
    /// <item><c>length</c> 文本太长</item>
    /// </list>
    /// </summary>
    [JsonPropertyName("finish_reason")]
    public string FinishReason { get; init; }
}

8. 创建QianWenRequestWrapper请求包装器

/// <summary>
/// 请求包装器
/// </summary>
public record QianWenRequestWrapper
{
	public static QianWenRequestWrapper<TInput, TParameters> Create<TInput, TParameters>(string model, TInput input, TParameters? parameters = default) => new()
	{
			Model = model ?? throw new ArgumentNullException(nameof(model)),
			Input = input ?? throw new ArgumentNullException(nameof(input)),
			Parameters = parameters,
	};

	public static QianWenRequestWrapper<TInput, object> Create<TInput>(string model, TInput inputPrompt) => new()
	{
			Model = model ?? throw new ArgumentNullException(nameof(model)),
			Input = inputPrompt ?? throw new ArgumentNullException(nameof(inputPrompt)),
	};
}
public record QianWenRequestWrapper<TInput, TParameters> : QianWenRequestWrapper
{
		[JsonPropertyName("model")]
		public string Model { get; set; }

		[JsonPropertyName("input")]
		public TInput Input { get; init; }

		[JsonPropertyName("parameters")]
		public TParameters? Parameters { get; init; }
}

9. 调用

QianWenChatCompletionService chatgpt = new("你的key", "模型名称:例如qwen-turbo");
ChatHistory historys = new ChatHistory();
historys.AddSystemMessage("你是一个c#编程高手,你将用代码回答我关于.net编程的技术问题,下面是我的第一个问题:");
historys.AddUserMessage("用c#写一个冒泡排序");
// 流式调用
await foreach (string item in chatgpt.GetStreamingChatMessageContentsAsync(historys))
{
    Console.Write(item);
}
//普通调用
var result = await chatgpt.GetChatMessageContentsAsync(historys);
Console.WriteLine(result);

到了这里,关于.Net接入AzureOpenAI、OpenAI、通义千问、智谱AI、讯飞星火、文心一言大语言模型。的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处: 如若内容造成侵权/违法违规/事实不符,请点击违法举报进行投诉反馈,一经查实,立即删除!

领支付宝红包 赞助服务器费用

相关文章

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

博客赞助

微信扫一扫打赏

请作者喝杯咖啡吧~博客赞助

支付宝扫一扫领取红包,优惠每天领

二维码1

领取红包

二维码2

领红包