DEV Community

Aatif G.
Aatif G.

Posted on

OpenAI vs Claude: NLP Implementation Guide in .NET 8

Introduction

This guide shows you how to implement Natural Language Processing using OpenAI GPT and Anthropic Claude in a .NET 8 application, with support for streaming responses and function calling.

Interface Definition

public interface INLPProvider
{
    Task<string> GenerateResponseAsync(string userMessage, List<ChatMessage>? history = null);
    IAsyncEnumerable<string> StreamResponseAsync(string userMessage, List<ChatMessage>? history = null);
    Task<FunctionCallResult> GenerateWithFunctionsAsync(string userMessage, List<Function> functions);
}

public record ChatMessage(string Role, string Content);

public record Function(string Name, string Description, object Parameters);

public record FunctionCallResult(string Response, string? FunctionName, string? FunctionArguments);
Enter fullscreen mode Exit fullscreen mode

OpenAI GPT Implementation

Setup

dotnet add package OpenAI
Enter fullscreen mode Exit fullscreen mode

Configuration

// appsettings.json
{
  "OpenAI": {
    "ApiKey": "your-api-key",
    "Model": "gpt-4-turbo-preview",
    "MaxTokens": 1000
  }
}
Enter fullscreen mode Exit fullscreen mode

Implementation

using OpenAI;
using OpenAI.Chat;

public class OpenAINLPProvider : INLPProvider
{
    private readonly ChatClient _client;
    private readonly string _model;
    private readonly int _maxTokens;
    private readonly ILogger<OpenAINLPProvider> _logger;

    public OpenAINLPProvider(
        IConfiguration configuration,
        ILogger<OpenAINLPProvider> logger)
    {
        var apiKey = configuration["OpenAI:ApiKey"];
        _model = configuration["OpenAI:Model"] ?? "gpt-4-turbo-preview";
        _maxTokens = int.Parse(configuration["OpenAI:MaxTokens"] ?? "1000");
        _client = new ChatClient(_model, apiKey);
        _logger = logger;
    }

    public async Task<string> GenerateResponseAsync(
        string userMessage,
        List<ChatMessage>? history = null)
    {
        var messages = BuildMessages(userMessage, history);

        var options = new ChatCompletionOptions
        {
            MaxTokens = _maxTokens,
            Temperature = 0.7f
        };

        var completion = await _client.CompleteChatAsync(messages, options);
        var response = completion.Value.Content[0].Text;

        _logger.LogInformation("Generated response: {Response}", response);
        return response;
    }

    public async IAsyncEnumerable<string> StreamResponseAsync(
        string userMessage,
        List<ChatMessage>? history = null)
    {
        var messages = BuildMessages(userMessage, history);

        var options = new ChatCompletionOptions
        {
            MaxTokens = _maxTokens,
            Temperature = 0.7f
        };

        await foreach (var update in _client.CompleteChatStreamingAsync(messages, options))
        {
            foreach (var contentPart in update.ContentUpdate)
            {
                if (!string.IsNullOrEmpty(contentPart.Text))
                {
                    yield return contentPart.Text;
                }
            }
        }
    }

    public async Task<FunctionCallResult> GenerateWithFunctionsAsync(
        string userMessage,
        List<Function> functions)
    {
        var messages = new List<OpenAI.Chat.ChatMessage>
        {
            new SystemChatMessage("You are a helpful assistant with access to functions."),
            new UserChatMessage(userMessage)
        };

        var tools = functions.Select(f => ChatTool.CreateFunctionTool(
            f.Name,
            f.Description,
            BinaryData.FromObjectAsJson(f.Parameters)
        )).ToList();

        var options = new ChatCompletionOptions();
        foreach (var tool in tools)
        {
            options.Tools.Add(tool);
        }

        var completion = await _client.CompleteChatAsync(messages, options);
        var firstChoice = completion.Value.Content[0];

        if (completion.Value.FinishReason == ChatFinishReason.ToolCalls)
        {
            var toolCall = completion.Value.ToolCalls[0];
            return new FunctionCallResult(
                Response: completion.Value.Content[0].Text ?? string.Empty,
                FunctionName: toolCall.FunctionName,
                FunctionArguments: toolCall.FunctionArguments.ToString()
            );
        }

        return new FunctionCallResult(
            Response: completion.Value.Content[0].Text,
            FunctionName: null,
            FunctionArguments: null
        );
    }

    private List<OpenAI.Chat.ChatMessage> BuildMessages(
        string userMessage,
        List<ChatMessage>? history)
    {
        var messages = new List<OpenAI.Chat.ChatMessage>
        {
            new SystemChatMessage("You are a helpful voice assistant.")
        };

        if (history != null)
        {
            foreach (var msg in history)
            {
                messages.Add(msg.Role.ToLower() switch
                {
                    "user" => new UserChatMessage(msg.Content),
                    "assistant" => new AssistantChatMessage(msg.Content),
                    _ => new SystemChatMessage(msg.Content)
                });
            }
        }

        messages.Add(new UserChatMessage(userMessage));
        return messages;
    }
}
Enter fullscreen mode Exit fullscreen mode

Anthropic Claude Implementation

Setup

dotnet add package Anthropic.SDK
Enter fullscreen mode Exit fullscreen mode

Configuration

// appsettings.json
{
  "Anthropic": {
    "ApiKey": "your-api-key",
    "Model": "claude-3-5-sonnet-20241022",
    "MaxTokens": 1000
  }
}
Enter fullscreen mode Exit fullscreen mode

Implementation

using Anthropic.SDK;
using Anthropic.SDK.Messaging;

public class ClaudeNLPProvider : INLPProvider
{
    private readonly AnthropicClient _client;
    private readonly string _model;
    private readonly int _maxTokens;
    private readonly ILogger<ClaudeNLPProvider> _logger;

    public ClaudeNLPProvider(
        IConfiguration configuration,
        ILogger<ClaudeNLPProvider> logger)
    {
        var apiKey = configuration["Anthropic:ApiKey"];
        _model = configuration["Anthropic:Model"] ?? "claude-3-5-sonnet-20241022";
        _maxTokens = int.Parse(configuration["Anthropic:MaxTokens"] ?? "1000");
        _client = new AnthropicClient(apiKey);
        _logger = logger;
    }

    public async Task<string> GenerateResponseAsync(
        string userMessage,
        List<ChatMessage>? history = null)
    {
        var messages = BuildMessages(userMessage, history);

        var request = new MessageRequest
        {
            Model = _model,
            MaxTokens = _maxTokens,
            Messages = messages,
            System = "You are a helpful voice assistant."
        };

        var response = await _client.Messages.CreateAsync(request);
        var content = response.Content[0].Text;

        _logger.LogInformation("Generated response: {Response}", content);
        return content;
    }

    public async IAsyncEnumerable<string> StreamResponseAsync(
        string userMessage,
        List<ChatMessage>? history = null)
    {
        var messages = BuildMessages(userMessage, history);

        var request = new MessageRequest
        {
            Model = _model,
            MaxTokens = _maxTokens,
            Messages = messages,
            System = "You are a helpful voice assistant.",
            Stream = true
        };

        await foreach (var chunk in _client.Messages.StreamAsync(request))
        {
            if (chunk.Type == "content_block_delta" && chunk.Delta?.Text != null)
            {
                yield return chunk.Delta.Text;
            }
        }
    }

    public async Task<FunctionCallResult> GenerateWithFunctionsAsync(
        string userMessage,
        List<Function> functions)
    {
        var messages = BuildMessages(userMessage, null);

        var tools = functions.Select(f => new Tool
        {
            Name = f.Name,
            Description = f.Description,
            InputSchema = f.Parameters
        }).ToList();

        var request = new MessageRequest
        {
            Model = _model,
            MaxTokens = _maxTokens,
            Messages = messages,
            System = "You are a helpful assistant with access to tools.",
            Tools = tools
        };

        var response = await _client.Messages.CreateAsync(request);

        if (response.StopReason == "tool_use" && response.Content.Any(c => c.Type == "tool_use"))
        {
            var toolUse = response.Content.First(c => c.Type == "tool_use");
            return new FunctionCallResult(
                Response: response.Content.FirstOrDefault(c => c.Type == "text")?.Text ?? string.Empty,
                FunctionName: toolUse.Name,
                FunctionArguments: toolUse.Input?.ToString()
            );
        }

        return new FunctionCallResult(
            Response: response.Content[0].Text,
            FunctionName: null,
            FunctionArguments: null
        );
    }

    private List<Message> BuildMessages(string userMessage, List<ChatMessage>? history)
    {
        var messages = new List<Message>();

        if (history != null)
        {
            foreach (var msg in history)
            {
                messages.Add(new Message
                {
                    Role = msg.Role.ToLower() == "user" ? "user" : "assistant",
                    Content = msg.Content
                });
            }
        }

        messages.Add(new Message { Role = "user", Content = userMessage });
        return messages;
    }
}
Enter fullscreen mode Exit fullscreen mode

Dependency Injection Setup

// Program.cs
builder.Services.AddSingleton<INLPProvider, OpenAINLPProvider>();
// OR
builder.Services.AddSingleton<INLPProvider, ClaudeNLPProvider>();
Enter fullscreen mode Exit fullscreen mode

Usage Examples

Basic Conversation

[ApiController]
[Route("api/[controller]")]
public class ConversationController : ControllerBase
{
    private readonly INLPProvider _nlp;

    [HttpPost("chat")]
    public async Task<IActionResult> Chat([FromBody] ChatRequest request)
    {
        var response = await _nlp.GenerateResponseAsync(request.Message, request.History);
        return Ok(new { response });
    }
}

public record ChatRequest(string Message, List<ChatMessage>? History);
Enter fullscreen mode Exit fullscreen mode

Streaming Response

[HttpPost("stream")]
public async Task StreamChat([FromBody] ChatRequest request)
{
    Response.ContentType = "text/event-stream";

    await foreach (var token in _nlp.StreamResponseAsync(request.Message, request.History))
    {
        await Response.WriteAsync($"data: {token}\n\n");
        await Response.Body.FlushAsync();
    }
}
Enter fullscreen mode Exit fullscreen mode

Function Calling Example

[HttpPost("book-room")]
public async Task<IActionResult> BookRoom([FromBody] BookingRequest request)
{
    var functions = new List<Function>
    {
        new Function(
            Name: "check_availability",
            Description: "Check room availability for given dates",
            Parameters: new
            {
                type = "object",
                properties = new
                {
                    check_in = new { type = "string", description = "Check-in date (YYYY-MM-DD)" },
                    check_out = new { type = "string", description = "Check-out date (YYYY-MM-DD)" },
                    room_type = new { type = "string", description = "Type of room requested" }
                },
                required = new[] { "check_in", "check_out" }
            }
        ),
        new Function(
            Name: "create_booking",
            Description: "Create a new room booking",
            Parameters: new
            {
                type = "object",
                properties = new
                {
                    guest_name = new { type = "string" },
                    check_in = new { type = "string" },
                    check_out = new { type = "string" },
                    room_type = new { type = "string" }
                },
                required = new[] { "guest_name", "check_in", "check_out", "room_type" }
            }
        )
    };

    var result = await _nlp.GenerateWithFunctionsAsync(request.Message, functions);

    if (result.FunctionName != null)
    {
        // Execute the function
        var functionResult = await ExecuteFunction(result.FunctionName, result.FunctionArguments);
        return Ok(new { result.Response, functionResult });
    }

    return Ok(new { result.Response });
}

public record BookingRequest(string Message);
Enter fullscreen mode Exit fullscreen mode

Context Management

public class ConversationManager
{
    private readonly IDistributedCache _cache;
    private readonly INLPProvider _nlp;

    public async Task<string> ProcessMessage(string sessionId, string userMessage)
    {
        // Retrieve conversation history
        var history = await GetConversationHistory(sessionId);

        // Generate response
        var response = await _nlp.GenerateResponseAsync(userMessage, history);

        // Update history
        history.Add(new ChatMessage("user", userMessage));
        history.Add(new ChatMessage("assistant", response));

        // Trim if too long (keep last 20 messages)
        if (history.Count > 20)
        {
            history = history.TakeLast(20).ToList();
        }

        // Save updated history
        await SaveConversationHistory(sessionId, history);

        return response;
    }

    private async Task<List<ChatMessage>> GetConversationHistory(string sessionId)
    {
        var cached = await _cache.GetStringAsync($"conv:{sessionId}");
        return cached != null
            ? JsonSerializer.Deserialize<List<ChatMessage>>(cached) ?? new()
            : new List<ChatMessage>();
    }

    private async Task SaveConversationHistory(string sessionId, List<ChatMessage> history)
    {
        var json = JsonSerializer.Serialize(history);
        await _cache.SetStringAsync(
            $"conv:{sessionId}",
            json,
            new DistributedCacheEntryOptions
            {
                AbsoluteExpirationRelativeToNow = TimeSpan.FromHours(1)
            });
    }
}
Enter fullscreen mode Exit fullscreen mode

Error Handling with Retry

public class ResilientNLPProvider : INLPProvider
{
    private readonly INLPProvider _provider;
    private readonly ILogger<ResilientNLPProvider> _logger;

    public async Task<string> GenerateResponseAsync(
        string userMessage,
        List<ChatMessage>? history = null)
    {
        var retryPolicy = Policy
            .Handle<Exception>()
            .WaitAndRetryAsync(3, retryAttempt =>
                TimeSpan.FromSeconds(Math.Pow(2, retryAttempt)),
                onRetry: (exception, timeSpan, retryCount, context) =>
                {
                    _logger.LogWarning(
                        "Retry {RetryCount} after {TimeSpan}. Error: {Error}",
                        retryCount, timeSpan, exception.Message);
                });

        return await retryPolicy.ExecuteAsync(async () =>
            await _provider.GenerateResponseAsync(userMessage, history));
    }

    // Similar implementations for other methods...
}
Enter fullscreen mode Exit fullscreen mode

Performance Optimization

Caching Common Responses

public class CachedNLPProvider : INLPProvider
{
    private readonly INLPProvider _provider;
    private readonly IMemoryCache _cache;

    public async Task<string> GenerateResponseAsync(
        string userMessage,
        List<ChatMessage>? history = null)
    {
        // Only cache simple queries without history
        if (history != null && history.Any())
        {
            return await _provider.GenerateResponseAsync(userMessage, history);
        }

        var cacheKey = $"nlp:{userMessage.ToLowerInvariant()}";

        if (_cache.TryGetValue(cacheKey, out string? cachedResponse))
        {
            return cachedResponse!;
        }

        var response = await _provider.GenerateResponseAsync(userMessage);

        _cache.Set(cacheKey, response, TimeSpan.FromMinutes(5));

        return response;
    }
}
Enter fullscreen mode Exit fullscreen mode

Testing

public class NLPProviderTests
{
    [Fact]
    public async Task GenerateResponse_SimpleQuery_ReturnsText()
    {
        // Arrange
        var mockConfig = new Mock<IConfiguration>();
        mockConfig.Setup(x => x["OpenAI:ApiKey"]).Returns("test-key");
        mockConfig.Setup(x => x["OpenAI:Model"]).Returns("gpt-4");

        var provider = new OpenAINLPProvider(
            mockConfig.Object,
            Mock.Of<ILogger<OpenAINLPProvider>>());

        // Act
        var response = await provider.GenerateResponseAsync("Hello");

        // Assert
        Assert.NotEmpty(response);
    }

    [Fact]
    public async Task StreamResponse_SimpleQuery_ReturnsTokens()
    {
        // Arrange
        var provider = CreateTestProvider();
        var tokens = new List<string>();

        // Act
        await foreach (var token in provider.StreamResponseAsync("Hello"))
        {
            tokens.Add(token);
        }

        // Assert
        Assert.NotEmpty(tokens);
    }
}
Enter fullscreen mode Exit fullscreen mode

Comparison

Feature OpenAI GPT-4 Claude 3.5 Sonnet
Speed 500-800ms 400-700ms
Context 128K tokens 200K tokens
Function Calling Yes Yes (Tools)
Pricing See here See here
Best For Complex reasoning Instruction following

Related Articles


Questions? Drop them in the comments!

Top comments (0)