<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: Michael Wahl</title>
    <description>The latest articles on DEV Community by Michael Wahl (@0xmichaelwahl).</description>
    <link>https://dev.to/0xmichaelwahl</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/0xmichaelwahl"/>
    <language>en</language>
    <item>
      <title>AWS Bedrock Knowledge - Base Testing Script</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Thu, 31 Oct 2024 14:30:58 +0000</pubDate>
      <link>https://dev.to/aws-builders/aws-bedrock-knowledge-base-testing-script-3gnl</link>
      <guid>https://dev.to/aws-builders/aws-bedrock-knowledge-base-testing-script-3gnl</guid>
      <description>&lt;p&gt;This is a simple, but helpful testing script to help you quickly test and validate your AWS bedrock + KB setup. Just update your AWS region if different, and plug in your Bedrock KB ID.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import boto3
import json
import time
from datetime import datetime
def test_kb_setup():
    """Test function to verify Bedrock Knowledge Base setup and queries"""
    # Initialize clients
    bedrock_agent = boto3.client('bedrock-agent-runtime', region_name='us-east-1')
    bedrock_runtime = boto3.client('bedrock-runtime', region_name='us-east-1')
    # Your Knowledge Base ID
    kb_id = "**your-knowledge-base-id**"  # Replace with your actual KB ID
    def test_kb_query(query_text):
        """Test a single knowledge base query"""
        print(f"\nTesting query: '{query_text}'")
        print("-" * 50)
        try:
            # Query the knowledge base
            response = bedrock_agent.retrieve(
                knowledgeBaseId=kb_id,
                retrievalQuery={'text': query_text},
                retrievalConfiguration={
                    'vectorSearchConfiguration': {
                        'numberOfResults': 3
                    }
                }
            )
            # Print raw response for debugging
            print("\nRaw Response:")
            print(json.dumps(response, indent=2, default=str))
            # Process and print retrieved results
            print("\nProcessed Results:")
            if 'retrievalResults' in response:
                for i, result in enumerate(response['retrievalResults'], 1):
                    print(f"\nResult {i}:")
                    print(f"Score: {result.get('score', 'N/A')}")
                    print(f"Content: {result.get('content', {}).get('text', 'N/A')}")
                    print(f"Location: {result.get('location', 'N/A')}")
            else:
                print("No results found in response")
            return True
        except Exception as e:
            print(f"Error during query: {str(e)}")
            return False
    def test_kb_with_bedrock(query_text):
        """Test knowledge base integration with Bedrock"""
        print(f"\nTesting KB + Bedrock integration for: '{query_text}'")
        print("-" * 50)
        try:
            # First get KB results
            kb_response = bedrock_agent.retrieve(
                knowledgeBaseId=kb_id,
                retrievalQuery={'text': query_text},
                retrievalConfiguration={
                    'vectorSearchConfiguration': {
                        'numberOfResults': 3
                    }
                }
            )
            # Format context from KB results
            context = ""
            if 'retrievalResults' in kb_response:
                context = "\n".join([
                    f"Reference {i+1}:\n{result.get('content', {}).get('text', '')}\n"
                    for i, result in enumerate(kb_response['retrievalResults'])
                ])
            # Prepare Bedrock prompt
            enhanced_prompt = (
                f"Using the following references:\n\n{context}\n\n"
                f"Please answer this question: {query_text}\n"
                "Base your response on the provided references and clearly cite them when used."
            )
            # Get Bedrock response
            bedrock_response = bedrock_runtime.invoke_model(
                modelId="anthropic.claude-v2",
                body=json.dumps({
                    "prompt": f"\n\nHuman: {enhanced_prompt}\n\nAssistant:",
                    "max_tokens_to_sample": 500,
                    "temperature": 0.7,
                    "top_p": 1,
                }),
                contentType="application/json",
                accept="application/json",
            )
            response_body = json.loads(bedrock_response.get('body').read())
            final_response = response_body.get('completion', '').strip()
            print("\nBedrock Response:")
            print(final_response)
            return True
        except Exception as e:
            print(f"Error during KB + Bedrock integration: {str(e)}")
            return False
    # Run test queries
    test_queries = [
        "What are our company's remote work policies?",
        "Tell me about employee benefits",
        "What is the vacation policy?",
        "How does the performance review process work?",
        "What are the working hours?"
    ]
    print("Starting Knowledge Base Tests")
    print("=" * 50)
    # Test 1: Basic KB Queries
    print("\nTest 1: Basic Knowledge Base Queries")
    for query in test_queries:
        success = test_kb_query(query)
        if not success:
            print(f"Failed on query: {query}")
    # Test 2: KB + Bedrock Integration
    print("\nTest 2: Knowledge Base + Bedrock Integration")
    for query in test_queries:
        success = test_kb_with_bedrock(query)
        if not success:
            print(f"Failed on integration test: {query}")
if __name__ == "__main__":
    test_kb_setup()
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
      <category>ai</category>
      <category>bedrock</category>
      <category>testdev</category>
      <category>python</category>
    </item>
    <item>
      <title>Implementing AI in a startup is transformative but requires the right strategy.</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Thu, 31 Oct 2024 14:22:46 +0000</pubDate>
      <link>https://dev.to/aws-builders/implementing-ai-in-a-startup-is-transformative-but-requires-the-right-strategy-33m9</link>
      <guid>https://dev.to/aws-builders/implementing-ai-in-a-startup-is-transformative-but-requires-the-right-strategy-33m9</guid>
      <description>&lt;p&gt;Here are some key principles every AI-focused startup should keep in mind:&lt;br&gt;
1️⃣ Solve a Real Problem: Start with a problem you deeply understand, set clear success metrics, and define specific goals.&lt;/p&gt;

&lt;p&gt;2️⃣ Scalable Data Approach: Ensure your data strategy scales by partnering for access to training data and maintaining rigorous data security standards.&lt;/p&gt;

&lt;p&gt;3️⃣ Don't Reinvent the Wheel: Leverage existing tools and platforms, focusing on how you uniquely apply them.&lt;/p&gt;

&lt;p&gt;4️⃣ Walk Before You Run: Prioritize quick wins and work alongside humans to enhance value in the early stages.&lt;/p&gt;

&lt;p&gt;5️⃣ Attract the Right Talent: Build a diverse team beyond ML engineers and data scientists to tackle various challenges effectively.&lt;/p&gt;

&lt;p&gt;6️⃣ Establish Ethical Boundaries Early: Set clear ethical principles for AI usage and ensure responsible deployment.&lt;/p&gt;

&lt;p&gt;Start with these principles to lay a strong foundation for collaborative, scalable, and ethical AI innovation! What else would you add to the list?&lt;/p&gt;

&lt;p&gt;Original Post - &lt;a href="https://www.linkedin.com/posts/michael-wahl217_ai-startups-artificialintelligence-activity-7228603608752107522-Sdqk" rel="noopener noreferrer"&gt;https://www.linkedin.com/posts/michael-wahl217_ai-startups-artificialintelligence-activity-7228603608752107522-Sdqk&lt;/a&gt;&lt;/p&gt;

</description>
      <category>ai</category>
      <category>startup</category>
      <category>ethicalai</category>
    </item>
    <item>
      <title>Amazon Bedrock + langchain, Retrieving Info from KBs leverage (RAG)</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Wed, 24 Jan 2024 14:41:28 +0000</pubDate>
      <link>https://dev.to/aws-builders/amazon-bedrock-langchain-retrieving-info-from-kbs-leverage-rag-2f1c</link>
      <guid>https://dev.to/aws-builders/amazon-bedrock-langchain-retrieving-info-from-kbs-leverage-rag-2f1c</guid>
      <description>&lt;p&gt;This is a short sample, but it demonstrates the power and capability of AWS Bedrock services using the foundational base model along with a Bedrock KB. This example can be extended even further using AWS Bedrock agents.&lt;/p&gt;

&lt;p&gt;Below using a simple Python script, we build a client connection to the AWS Bedrock services, the output we retrieve back can be piped or routed to other upstream processes including AWS Bedrock agents or serverless lambda functions.&lt;/p&gt;

&lt;p&gt;Hopefully, this helps get you started experimenting and thinking more about this. Often these short examples become Lego blocks, where you can continue building the bigger or better.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;This is a short sample, but it demonstrates the power and capability of AWS Bedrock services using the foundational base model along with a Bedrock KB. This example can be extended even further using AWS Bedrock agents.

Below using a simple Python script, we build a client connection to the AWS Bedrock services, the output we retrieve back can be piped or routed to other upstream processes including AWS Bedrock agents or serverless lambda functions.

Hopefully, this helps get you started experimenting and thinking more about this. Often these short examples become Lego blocks, where you can continue building the bigger or better.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



</description>
      <category>aws</category>
      <category>llm</category>
      <category>bedrock</category>
      <category>langchain</category>
    </item>
    <item>
      <title>AWS Bedrock Beyond the Base Models, Considering Custom Models.</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Sun, 21 Jan 2024 16:52:05 +0000</pubDate>
      <link>https://dev.to/aws-builders/aws-bedrock-beyond-the-base-models-considering-custom-models-7g6</link>
      <guid>https://dev.to/aws-builders/aws-bedrock-beyond-the-base-models-considering-custom-models-7g6</guid>
      <description>&lt;p&gt;I have been experimenting with AWS Bedrock for a few weeks! With more of a focus on text or content generation, and summarization, and much less on Chat or Q&amp;amp;A and illustration generation.&lt;/p&gt;

&lt;p&gt;There are already many articles, blogs, and videos that cover the basics of the bedrock services, and making API/Python boto3 calls, so I won't cover those areas here. &lt;/p&gt;

&lt;p&gt;I want to share some code examples of using Python boto3 to make calls using the base foundational models in AWS bedrock, but also using custom models that were trained on the base foundation models. &lt;/p&gt;

&lt;p&gt;There are a couple of parameters in the code below that can be modified for your specific use case or just leave them as is and experiment with them and observe the changes in the outputs. You can change the inputText (prompt) to whatever you want. Save this code and run it as Python3 FileName.py&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import boto3
import json

bedrock = boto3.client(service_name= 'bedrock-runtime', region_name= 'us-east-1')
modelId = '**amazon.titan-text-lite-v1**'
accept = 'application/json'
contentType = 'application/json'
body = json.dumps({
  "inputText": "Write a blog about EVs and the environment.",
  "textGenerationConfig": {
        "maxTokenCount": 1024,
        "temperature":0.4,
        "topP":1
        }
})

response = bedrock.invoke_model(body=body, modelId=modelId, accept=accept, contentType=contentType)    

response_body = json.loads(response.get('body').read())
outputText = response_body.get('results')[0].get('outputText')

text = outputText[outputText.index('\n')+1:]
content = text.strip()
print(outputText)
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Below we can see a small change which is where we update the ModelId to be the custom-model/provisioned model throughput represented as the ARN.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import boto3
import json

bedrock_runtime = boto3.client(service_name= 'bedrock-runtime', region_name= 'us-east-1')

modelId = '**provisioned model throughputs ARN**'
accept = 'application/json'
contentType = 'application/json'
body = json.dumps({
  "inputText": "Write a blog about EVs and the environment.",
  "textGenerationConfig": {
        "maxTokenCount": 1024,
        "temperature":0.4,
        "topP":1
        }
})

response = bedrock_runtime.invoke_model(body=body, modelId=modelId, accept=accept, contentType=contentType)

response_body = json.loads(response.get('body').read())
outputText = response_body.get('results')[0].get('outputText')

text = outputText[outputText.index('\n')+1:]
content = text.strip()
print(outputText)
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Two more tips that were super helpful to me!&lt;/strong&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;If you are pre-training data based on a base model, it is critical that you save the file in the JSONL file format, and that there are no extra spaces at the bottom of the file. More info here on that- &lt;a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html" rel="noopener noreferrer"&gt;https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html&lt;/a&gt; &lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;This is an example, where we see nothing after or under the last line, otherwise, you will have parse errors when you try and fine-tune the custom model.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;{"prompt": "&amp;lt;prompt text&amp;gt;", "completion": "&amp;lt;expected generated text&amp;gt;"}
{"prompt": "&amp;lt;prompt text&amp;gt;", "completion": "&amp;lt;expected generated text&amp;gt;"}
{"prompt": "&amp;lt;prompt text&amp;gt;", "completion": "&amp;lt;expected generated text&amp;gt;"}      

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ol&gt;
&lt;li&gt;I wanted to create or generate a couple dozen lines to be used with fine-tuning similar to what's above. With domains/topics, I am familiar with this is an easier task, but for others, it can be hard to get started.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;There is probably another way to tackle this, but I ended up creating a GPT [(&lt;a href="https://chat.openai.com/g/g-2qfCPili0-dataset-trainer)" rel="noopener noreferrer"&gt;https://chat.openai.com/g/g-2qfCPili0-dataset-trainer)&lt;/a&gt;] &lt;strong&gt;(you will need ChatGPT Plus to access it)&lt;/strong&gt;, Maybe something I can do with AWS PartyRock and PartySmith which worked well for this ideation/creation step. &lt;/p&gt;

&lt;p&gt;With the GPT mentioned above:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Just drop in a PDF or whatever data/file you are working with.
&lt;/li&gt;
&lt;li&gt;Is this for pre-training or fine-tuning?&lt;/li&gt;
&lt;li&gt;Ask the GPT What completion text suits this prompt for fine-tuning. &lt;/li&gt;
&lt;li&gt;Now the fun part, we ask to generate some pre-training text that's in JSONL format, and how many lines you want, in my case I asked for 25-30. &lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;The JSONL format is whats uploaded to the AWS S3 bucket and used for fine-tuning or the continued pre-training for example. &lt;/p&gt;

</description>
    </item>
    <item>
      <title>Leveraging Amazon Bedrock for Content generation, Ideation, and Iteration!</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Mon, 15 Jan 2024 21:16:48 +0000</pubDate>
      <link>https://dev.to/aws-builders/leveraging-amazon-bedrock-for-content-generation-ideation-and-iteration-36oj</link>
      <guid>https://dev.to/aws-builders/leveraging-amazon-bedrock-for-content-generation-ideation-and-iteration-36oj</guid>
      <description>&lt;p&gt;Amazon bedrock can be accessed using the AWS console, and each of the foundational models can be used in a playground for testing. &lt;/p&gt;

&lt;p&gt;For this post, I will be sharing how I used the Python SDK - Boto3 to interact with Bedrock, and to generate text. &lt;/p&gt;

&lt;p&gt;As a note, you will need to request access to the foundational models. To do this, go to Bedrock in the AWS console, click on "Model Access" and click on the edit button. Then, select all the models you want to use and save. It will take a while for them to be available.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F15w2ovz3qah0d3jevbs9.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F15w2ovz3qah0d3jevbs9.png" alt="Image description" width="800" height="417"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Once you have requested access to the model, you will need to configure a local aws-cli configuration to work with bedrock-runtime API. To do this, you can use the following command:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;aws configure

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;I would also recommend creating separate specific IAM credentials to use with the Bedrocks API. &lt;/p&gt;

&lt;p&gt;Below is a simple Python script to be used when accessing the Bedrock API, and using the Cohere Command foundational model.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import boto3
import json

bedrock = boto3.client(
    service_name='bedrock-runtime', 
    region_name="us-east-1"
    )   
prompt = """
Write a blog post that I can share on Medium, or Linked In, on how to maximize the range, and battery life of an EV, as a new EV owner.       
"""
body = json.dumps({
        "prompt": prompt,
        "max_tokens": 2000, #Maximum number of tokens to generate. Responses are not guaranteed to fill up to the maximum desired length.
        "temperature": 0.75, #The higher the temperature, the crazier the text. Tunes the degree of randomness in a generation. Lower temperatures mean fewer random generations.
        "p": 0.01, #If set to float less than 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.
        "k": 0, #If set to int &amp;gt; 1, only the top k tokens sorted in descending order according to their probability are kept for generation.
        #stop_sequences: ["\n\n"] #Sequence of tokens where generation is stopped.
})

# Define the type of model that will be used for the inference
modelId = 'cohere.command-text-v14'
accept = 'application/json'
contentType = 'application/json'

# Call the Bedrock API
response = bedrock.invoke_model(body=body, modelId=modelId, accept=accept, contentType=contentType)

response_body = json.loads(response.get('body').read())

print(response_body['generations'][0]['text'])

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;This is another slightly different Python script, where we specify the AWS credentials to be used, along with using the Anthropic Claude model.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;#Anthropic Claude v2.1
# Make sure that you have the latest version of boto3
#pip install --upgrade boto3
#print(boto3.__version__) --&amp;gt; should be at least 1.28
import boto3
import json

# function to call the bedrock API
def call_bedrock(prompt, assistant):
    access_key = 'ABC-YourKeyHere-123'
    access_secret = 'ABC-YourSecretHere'
    bedrock = boto3.client(service_name='bedrock-runtime', 
                           region_name='us-east-1', 
                           aws_access_key_id=access_key, 
                           aws_secret_access_key=access_secret)   
    body = json.dumps({
        "prompt": f"\n\nHuman:{prompt}\n\nAssistant:{assistant}",   
        "max_tokens_to_sample": 1000, #Maximum number of tokens to generate. Responses are not guaranteed to fill up to the maximum desired length.
        "temperature": 0.4, #The higher the temperature, the crazier the text. Tunes the degree of randomness in a generation. Lower temperatures mean fewer random generations.
        "top_p": 1, #If set to float less than 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.
    #    "top_k": 250, #If set to int &amp;gt; 1, only the top k tokens sorted in descending order according to their probability are kept for generation.
        #stop_sequences: ["\n\n"] #Sequence of tokens where generation is stopped.
    })

# Define the type of model that will be used for the inference
    modelId = 'anthropic.claude-v2'
    accept = 'application/json'
#   accept = '*/*'
    contentType = 'application/json'

# Call the Bedrock API
    response = bedrock.invoke_model(body=body, modelId=modelId, accept=accept, contentType=contentType)
    response_body = json.loads(response.get('body').read())
    print(response_body.get('completion'))

# Call_bedrock function 
call_bedrock("Write a blog post that I can share on Medium, or Linked In, on how to maximize the range, and battery life of an EV, as a new EV owner.")

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;With either Python script, the code is accessing the AWS Bedrock-runtime API to generate text/content as an output. &lt;/p&gt;

&lt;p&gt;Walking through the code quickly, it imports the necessary libraries, including boto3 and JSON. The boto3 library is then used to interact with the AWS Services, and the JSON library helps us to serialize and deserialize the JSON data. &lt;/p&gt;

&lt;p&gt;After we save the Python file we can run it using a terminal, or within Visual Studio Code which is what I often do.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;python bedrock_demo1.py

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;There are many parameters to test with, temperature is one to play around with and experiment to see how the higher value above zero impacts the content being generated with either base foundational model.  &lt;/p&gt;

</description>
      <category>ai</category>
      <category>aws</category>
      <category>python</category>
      <category>llm</category>
    </item>
    <item>
      <title>Logging AWS SES Details</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Tue, 26 Dec 2023 20:14:31 +0000</pubDate>
      <link>https://dev.to/aws-builders/logging-aws-ses-details-2gim</link>
      <guid>https://dev.to/aws-builders/logging-aws-ses-details-2gim</guid>
      <description>&lt;p&gt;&lt;em&gt;Cover Image Generated with DALLE :)&lt;/em&gt;&lt;br&gt;
AWS SES allows you to view various metrics such as the # of sends, rejects, bounces, etc. However, there is no place to view or log additional information such as the sender, recipient, subject, message or if the email is rejected, what is the reason or the diagnostic Code for example. &lt;/p&gt;

&lt;p&gt;Recently I needed to capture detailed notifications about email bounces and deliveries and log that information in AWS CloudWatch under the CloudWatch log group. &lt;/p&gt;

&lt;p&gt;Prerequisites that you may need to complete before deploying the additional SES logging.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Under Amazon SES&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Make sure that your domain, or email address identity shows in green as verified.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Under Amazon SNS&lt;/strong&gt; &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Create a new SNS topic to capture detailed notifications for bounces, deliveries, etc. &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Now head back to Amazon SES, under the verified identity (domain or email), select the Notifications tab, click on the Feedback Notifications section -&amp;gt; Click the edit button to navigate to the next page. Select the bounce notification topic with the SNS topic ARN that we created in the prior step and click Save Changes.&lt;/p&gt;

&lt;p&gt;Once the prerequisite steps are complete, we are ready to move on to the next step!&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;Download ses_bounce_logging_blog.yml CloudFormation file from the below link, once you save this on your local machine, follow the next steps.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;a href="https://dev.tourl"&gt;(https://github.com/aws-samples/digital-user-engagement-reference-architectures/blob/master/cloudformation/ses_bounce_logging_blog.yml)&lt;/a&gt;&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;Go to the AWS CloudFormation Console and Click Create Stack.&lt;br&gt;
Select the Upload template button and Click Choose to upload the ses_bounce_logging_blog.yml file that we downloaded earlier.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Click Next, Specify a Stack Name, update the default value of Cloud-Watch-Group-Name, and Select the Event Type “Bounce”, “Complaint”, or “Delivery”. For my use case, I selected bounce as that's what I suspected was happening to emails being sent out, but not received by the intended recipient.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Enter the (ARN) of the AWS SNS topic that we created earlier and click Next, and next. &lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Select I acknowledge..., and click Create Stack. &lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;strong&gt;Time to Test&lt;/strong&gt;&lt;br&gt;
Go to Amazon SES and select the Verified Domain Identity Checkbox. Click Send a Test Email Button. Fill in the required information such as the From-address, Subject, and Scenario is Bounce, and then click Send Test Email.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;AWS Cloudwatch&lt;/strong&gt;&lt;br&gt;
Head over to AWS Cloudwatch | log groups, once the bounce notification is received in the SNS topic it will be sent over to the Lambda function, and then will be logged in the /aws /ses/bounce_logs CloudWatch log group. As a small set of notes, the default log retention for these SES logs is set to forever, but you can tweak the retention period to whatever makes sense for your specific project or policies. You may also export the logs to Amazon S3 for longer-term storage.&lt;/p&gt;

&lt;p&gt;To visualize this data, you can filter down in AWS CloudWatch, this also allows us to graph metrics and make them searchable. you can export the logs to Amazon S3 for the long term.&lt;/p&gt;

</description>
      <category>aws</category>
      <category>learning</category>
      <category>tutorial</category>
      <category>devops</category>
    </item>
    <item>
      <title>Excited to share my latest creations on AWS PartyRock!</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Sat, 23 Dec 2023 00:35:54 +0000</pubDate>
      <link>https://dev.to/aws-builders/excited-to-share-my-latest-creation-on-aws-partyrock-2jk</link>
      <guid>https://dev.to/aws-builders/excited-to-share-my-latest-creation-on-aws-partyrock-2jk</guid>
      <description>&lt;p&gt;🚀 Excited to share my latest creation on AWS PartyRock! &lt;/p&gt;

&lt;p&gt;This was my thought process from a GPT perspective for the first example app shared below. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Name:&lt;/strong&gt; Post Boost&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Description:&lt;/strong&gt; A friendly GPT that helps generate content on social media that's engaging with your followers.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Instructions/Prompt:&lt;/strong&gt; Post Boost, your role encompasses not only assisting in generating and optimizing various forms of social media content, including posts, stories, newsletters, polls, and case studies but also acting as a post coach. This involves guiding users on best practices for creating engaging content, particularly in the technology sector. You'll remember previous interactions for personalized experiences and ask users to specify their content type for tailored assistance. Your approach includes using hashtags, mentions, emojis, and URLs strategically. With a friendly, casual, and enthusiastic tone, you provide clarifications, feedback, and coaching to enhance users' social media skills.&lt;/p&gt;

&lt;p&gt;I headed over to PartyRock, created a new app, and used the original Instructions/Prompt from above. Below is the public link to my app. Excited to hear what you think, useful-helpful?&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;&lt;a href="https://partyrock.aws/u/mwahl217/80ZtmlcQk/Social-Media-Content-Assistant-Coach" rel="noopener noreferrer"&gt;https://partyrock.aws/u/mwahl217/80ZtmlcQk/Social-Media-Content-Assistant-Coach&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;a href="https://partyrock.aws/u/mwahl217/xrf5vSlrr/EV-Charging-Assistant" rel="noopener noreferrer"&gt;https://partyrock.aws/u/mwahl217/xrf5vSlrr/EV-Charging-Assistant&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;a href="https://partyrock.aws/u/mwahl217/9D39lrj9H/Grill-Guru%3A-Traeger-Smoker-Troubleshooting-and-Maintenance-Assistant" rel="noopener noreferrer"&gt;https://partyrock.aws/u/mwahl217/9D39lrj9H/Grill-Guru%3A-Traeger-Smoker-Troubleshooting-and-Maintenance-Assistant&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;a href="https://partyrock.aws/u/mwahl217/GOi2agY9R/Virtual-Fitness-Coach" rel="noopener noreferrer"&gt;https://partyrock.aws/u/mwahl217/GOi2agY9R/Virtual-Fitness-Coach&lt;/a&gt;&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;

</description>
      <category>aws</category>
      <category>partyrock</category>
      <category>ai</category>
      <category>learning</category>
    </item>
    <item>
      <title>Building a Simple Weather App With AWS Lambda and API Gateway</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Thu, 13 Jul 2023 21:42:31 +0000</pubDate>
      <link>https://dev.to/aws-builders/building-a-simple-weather-app-with-aws-lambda-and-api-gateway-8bi</link>
      <guid>https://dev.to/aws-builders/building-a-simple-weather-app-with-aws-lambda-and-api-gateway-8bi</guid>
      <description>&lt;p&gt;Introduction&lt;br&gt;
This is my second open-source project built using ChatGPT as my code review assistant and for a lot of the debugging, and troubleshooting.&lt;br&gt;
Behind this simple app, the heavy lifting is done using AWS Lambda, API Gateway, and OpenWeatherMap.&lt;/p&gt;

&lt;p&gt;Problem Statement&lt;br&gt;
I wanted to see build a really simple serverless weather app to check if it’s raining, snowing, cloudy, etc. I wanted to call a protected public API, run a lambda function, and get a simple short result with the weather conditions within my local zip code.&lt;/p&gt;

&lt;p&gt;Solution Overview&lt;br&gt;
As you may be aware AWS Lambda is a serverless computing service that allows you to run code without provisioning or managing servers, storage, etc. The AWS API Gateway is a fully managed service for creating, deploying, and managing APIs. By leveraging AWS Lambda and API Gateway, you can benefit from the serverless architecture, which eliminates the need to manage infrastructure and enables auto-scaling and high availability. This approach allows me to focus on the app’s logic, and its features, without worrying about server setup, maintenance, or scalability.&lt;/p&gt;

&lt;p&gt;I will leverage the OpenWeatherMap API to retrieve weather data for a given location. To achieve this, I will use Node.js and Express.js to develop the app’s logic. As I mentioned above, the core of the app will be done as an AWS Lambda function, which will be triggered when an API request is made. The AWS API Gateway acts as the front end for my simple app, providing an API endpoint that users can access.&lt;/p&gt;

&lt;p&gt;Setting Up the Development Environment&lt;br&gt;
Choose a code editor that you’re comfortable with. Some popular options include Visual Studio Code, Atom, or Sublime Text. Install the code editor of your choice and open the project directory in it. I tend to use either Sublime or Visual Studio (VS) Code, VS Code is nice as you can easily commit, and push code to GitHub once you are ready.&lt;/p&gt;

&lt;p&gt;Deploying to AWS Lambda and API Gateway&lt;br&gt;
Since there are no servers, there is nothing to install or update, we just need to set up and configure a few things with AWS Lambda, and AWS API Gateway. I won't cover creating a deployment package of the node.js app, and all other necessary files, you can simply clone the repository I have out on GitHub &lt;a href="https://github.com/mwahl217/weather-app-aws" rel="noopener noreferrer"&gt;https://github.com/mwahl217/weather-app-aws&lt;/a&gt;. You can simply upload the Zip file to Lambda when you edit the code and enter your API key and specific location information for OpenWeatherMap.&lt;/p&gt;

&lt;p&gt;Create an AWS Lambda Function:&lt;br&gt;
— Go to the AWS Management Console and navigate to the AWS Lambda service.&lt;br&gt;
— Click on “Create function” and choose “Author from scratch”.&lt;br&gt;
— Provide a name for your function, select “Node.js” as the runtime, don't worry about the version, and choose an appropriate execution role. As a note, I did use node.js 18.x, and set the handler as index.handler.&lt;br&gt;
— In the “Function code” section, select “Upload a .zip file” and upload the ZIP file you downloaded from above.&lt;br&gt;
— Set the handler to the appropriate value, such as app.handler or index.handler.&lt;br&gt;
— Configure the necessary environment variables, such as the OpenWeatherMap API key, and any other required configuration values such as your Zip code and Country for example.&lt;br&gt;
— Save the Lambda function.&lt;/p&gt;

&lt;p&gt;Set Up an API Gateway:&lt;br&gt;
— Go to the AWS Management Console and navigate to the API Gateway service.&lt;br&gt;
— Click on “Create API” and choose “REST API”.&lt;br&gt;
— Configure your API by specifying a name and selecting the appropriate security settings.&lt;br&gt;
— Create a new resource, e.g., “/weather” or whatever makes sense for you.&lt;br&gt;
— Add a method to the resource, such as “GET”.&lt;br&gt;
— Configure the integration type as “Lambda Function” and select the Lambda function you created above. If you can remember, just head back to your lambda function, and under the function overview, you will find a line for function ARN which you can copy and paste into the Lambda function field.&lt;br&gt;
— Save the API configuration.&lt;/p&gt;

&lt;p&gt;Deploying the API:&lt;br&gt;
— In the API Gateway console, select the API you created.&lt;br&gt;
— Go to the “Actions” dropdown and choose “Deploy API”.&lt;br&gt;
— Choose a stage name (e.g., “prod”) and deploy the API.&lt;br&gt;
— Note the generated API endpoint URL which will be up toward the top of the screen.&lt;/p&gt;

&lt;p&gt;Test the Deployed App:&lt;br&gt;
— Use the generated API endpoint URL to test your deployed app by making an HTTP GET request to the appropriate resource path, such as &lt;code&gt;https://your-api-endpoint/prod&lt;/code&gt;. From a terminal, you can also just use a simple cURL &lt;a href="https://your-api-endpoint/prod" rel="noopener noreferrer"&gt;https://your-api-endpoint/prod&lt;/a&gt;&lt;br&gt;
— Verify that the response indicates whether it’s raining, cloudy, snowing, or not.&lt;/p&gt;

&lt;p&gt;Final Thoughts and Wrap up&lt;/p&gt;

&lt;p&gt;Remember to review and configure the necessary security settings, such as authentication and authorization, based on your specific requirements.&lt;/p&gt;

&lt;p&gt;I did decide to use a simple WAF Web ACL to restrict what or rather who could access from API, and from a specific set of IP Sets I specified. The AWS API gateway also allows for custom domains to help simplify the API URL instead of the default which is random and follows execute-api.us-east-1.amazonaws.com, your specific URL may look a little different depending on the AWS region you are using for your deployments.&lt;/p&gt;

</description>
      <category>aws</category>
      <category>lambda</category>
      <category>api</category>
      <category>javascript</category>
    </item>
    <item>
      <title>AWS SageMaker and Canvas</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Fri, 07 Jul 2023 18:29:05 +0000</pubDate>
      <link>https://dev.to/aws-builders/aws-sagemaker-and-canvas-2bce</link>
      <guid>https://dev.to/aws-builders/aws-sagemaker-and-canvas-2bce</guid>
      <description>&lt;p&gt;In this post, I will go through a step-by-step process on how to build some cost/price predictions based on historical data, and without writing a single line of code!&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Problem/Challenges&lt;/strong&gt;&lt;br&gt;
With some relatively small CSV files containing historical data, we can make some powerful predications or forecasts based on that historical data. Of course, the more data that you have, the better, but start with what you have. I am going to follow a use case for Manufacturing. There are many others you can follow in the AWS labs, or simply upload your own datasets once you have AWS SageMaker up and running, and then choose your own adventure.     &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Solution(s)&lt;/strong&gt;&lt;br&gt;
We can use a few different AWS services, including AWS S3, SageMaker, and Canvas. Basically, we will upload the historical data to a private AWS S3 bucket, then using AWS SageMaker we will build a model, analyze it, and finally predict target values. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Preparation&lt;/strong&gt;&lt;br&gt;
For simplicity, I am going to use a dataset from an AWS lab. &lt;a href="https://catalog.workshops.aws/canvas-immersion-day/en-US/1-use-cases" rel="noopener noreferrer"&gt;Canvas for Transportation &amp;amp; Logistics: Supply Chain delivery on-time.&lt;/a&gt; &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Steps&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Part 1&lt;/strong&gt;&lt;br&gt;
The first thing we need to do after signing into the AWS Console, is to choose the region that works for you, not all may be available for SageMaker, I will be using US-East-1. &lt;a href="https://us-east-1.console.aws.amazon.com/sagemaker/home?region=us-east-1#/landing" rel="noopener noreferrer"&gt;SageMaker Home&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Next we need to create a domain, click Get Started button.&lt;br&gt;
&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fo688p1ebr0ytqjftdegu.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fo688p1ebr0ytqjftdegu.png" alt="Image description" width="800" height="313"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Select the create domain, a new window opens called setup SageMaker Domain. &lt;/p&gt;

&lt;p&gt;Enter a Name under Domain name, I called mine forecast, but you can name it whatever makes sense to you. Next you will need to create a new IAM role to allow access to your AWS account. Click Create a new role, then select Any S3 bucket and click Create New Role. Once it’s done, click Submit at the bottom of the page. &lt;/p&gt;

&lt;p&gt;If you encounter any errors, they will be displayed at the top, but following these steps you should be fine. There is also an option to specify or limit the IAM policy to an S3 bucket, I decided not to limit it, and named my s3 bucket "sagemaker-forecasting". &lt;/p&gt;

&lt;p&gt;At this point, it took some time to create the domain, so now is a great time to grab a coffee, cold beverage, or make sure you have your data ready to upload to S3.&lt;/p&gt;

&lt;p&gt;As I mentioned, we will be using a synthetic dataset called Shipping Logs Dataset. This dataset contains complete shipping data for all products delivered including estimated time, shipping priority, carrier and origin. The dataset can be downloaded &lt;a href="https://static.us-east-1.prod.workshops.aws/public/80d30504-44b2-465c-8e1f-cf1018d6e694/static/datasets/ShippingLogs.csv" rel="noopener noreferrer"&gt;here&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Next we need to upload the dataset to an Amazon S3 bucket. if you are not familiar, an S3 bucket is simply a place to store and retrieve data in the cloud. You can create a new bucket or use any existing buckets, I will be using an existing s3 bucket I created earlier, when we setup the domain. &lt;/p&gt;

&lt;p&gt;If you need to create a new S3 bucket, simply go to the AWS Console and search for S3 or click &lt;a href="https://s3.console.aws.amazon.com/s3/home?region=us-east-1" rel="noopener noreferrer"&gt;here&lt;/a&gt;. &lt;/p&gt;

&lt;p&gt;Click the Create bucket button. Give a bucket a unique name and keep all other parameters as default. Once the s3 bucket is created and ready, click upload and select the dataset or the CSV file from above, in my example it's the ShippingLogs.csv file. &lt;/p&gt;

&lt;p&gt;At this point we can head back to the SageMaker console and check if the domain is ready, it should now have a status of InService, click on the domain name when its ready and InService. &lt;/p&gt;

&lt;p&gt;Select the launch to the right of the user profile, and choose &lt;br&gt;
Canvas. The first time you login to SageMaker Canvas it will prompt you, you can skip this for now. &lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhhpuyyynwcmew9xrmwb0.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhhpuyyynwcmew9xrmwb0.png" alt="Image description" width="800" height="250"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;On the left side you will see a menu, click Datasets and then click the Import. Select Amazon S3 as the Data Source, select your CSV file and click the Import data button at the bottom. You will see an option to preview the first 100 rows and import the data. If you see canvas-sample-shipping-logs.csv, you and skip the import step, and instead click on the join datasets button. Now drag and drop from the left side of the screen to the right two datasets: canvas-sample-product-descriptions and canvas-sample-shipping-logs or the name of the dataset if you uploaded to AWS S3. Make sure that ProductID is used as join key, by clicking on the join icon. Now click on Save &amp;amp; close to save this dataset, and give it a name such as Logistics or SupplyChain. &lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fb4hq7fwy3q410nzysokd.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fb4hq7fwy3q410nzysokd.png" alt="Image description" width="800" height="370"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;We are ready to build and train our new model. &lt;/p&gt;

&lt;p&gt;Select the model with the checkbox on the left, then click on Create Model on the top right, name it, and click on Create. &lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fond4jbai5i7rjb30mmzw.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fond4jbai5i7rjb30mmzw.png" alt="Image description" width="798" height="346"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The first step in training our ML model is to choose our target variable. For this example, the target is the ExpectedShippingDays variable. This is what we want to train the model to predict. Canvas will automatically detect that this is a Numeric Prediction problem (also known as regression). Taking a quick pause here, this is where you may have something slightly different to predict as you experiment and evaluate different datasets, and use cases. I wont cover this in this post, but we can also run similar for "classification" use cases, by predicting the "OnTimeDelivery" instead of "ExpectedShippingDays". &lt;/p&gt;

&lt;p&gt;To train a preview model, click on the Preview model button on the top right of the screen. This can take some time, anywhere from 2-10minutes is what I saw. When training a model, Canvas provides two options: Quick Build and Standard Build.&lt;/p&gt;

&lt;p&gt;Once done, Canvas will automatically move to the Analyze tab, to show us the results of our quick training. &lt;/p&gt;

&lt;p&gt;At this point we can move on to predict, this will give us the predicted values of ExpectedShippingDays for our example.  &lt;/p&gt;

</description>
      <category>aws</category>
      <category>data</category>
      <category>nocode</category>
      <category>machinelearning</category>
    </item>
    <item>
      <title>Training ChatGPT with local data to create your own chat bot!</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Sun, 02 Jul 2023 15:58:04 +0000</pubDate>
      <link>https://dev.to/0xmichaelwahl/training-chatgpt-with-local-data-to-create-your-own-chat-bot-4l53</link>
      <guid>https://dev.to/0xmichaelwahl/training-chatgpt-with-local-data-to-create-your-own-chat-bot-4l53</guid>
      <description>&lt;p&gt;Using OpenAI’s ChatGPT, we can train a language model using our own local/custom data, thats scoped toward our own needs or use cases. &lt;/p&gt;

&lt;p&gt;I am using a Mac/MacOS, but you can also use Windows or Linux. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Install Python&lt;/strong&gt;&lt;br&gt;
You need to make sure you have Python installed, and at least version 3.0+. Head over to following link and download python installer: &lt;a href="https://www.python.org/downloads" rel="noopener noreferrer"&gt;&lt;/a&gt;. You can also open a terminal and run &lt;code&gt;python3 --version&lt;/code&gt; to verify you have the correct version of python installed. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Upgrade PIP&lt;/strong&gt;&lt;br&gt;
&lt;code&gt;python3 -m pip install -U pip&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Installing Libraries&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;pip3 install openai
pip install gpt_index==0.4.24
pip3 install PyPDF2
pip3 install gradio
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Get OpenAI key&lt;/strong&gt;&lt;br&gt;
&lt;a href="https://platform.openai.com/account/api-keys" rel="noopener noreferrer"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Prep Data&lt;/strong&gt;&lt;br&gt;
Create a new directory named ‘docs’ anywhere you like and put PDF, TXT or CSV files inside it. You can add multiple files if you like but remember that more data you add, more the tokens will be used. Free accounts are given 18$ worth of tokens to use.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Script&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import gradio as gr
import sys
import os

os.environ["OPENAI_API_KEY"] = 'ApiGoesHere'

def construct_index(directory_path):
    max_input_size = 4096
    num_outputs = 512
    max_chunk_overlap = 20
    chunk_size_limit = 600

    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.7, model_name="text-davinci-003", max_tokens=num_outputs))

    documents = SimpleDirectoryReader(directory_path).load_data()

    index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)

    index.save_to_disk('index.json')

    return index

def chatbot(input_text):
    index = GPTSimpleVectorIndex.load_from_disk('index.json')
    response = index.query(input_text, response_mode="compact")
    return response.response

iface = gr.Interface(fn=chatbot,
                     inputs=gr.inputs.Textbox(lines=7, label="Enter your text"),
                     outputs="text",
                     title="My AI Chatbot")

index = construct_index("docs")
iface.launch(share=True)

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Save as app.py &lt;/p&gt;

&lt;p&gt;Open Terminal and run following command&lt;/p&gt;

&lt;p&gt;&lt;code&gt;python3 app.py&lt;/code&gt;&lt;/p&gt;

&lt;p&gt;This will start training. This might take some time based on how much data you have fed to it. Once done, it will output a link where you can test the responses using simple UI. It outputs local URL: &lt;a href="http://127.0.0.1:7860" rel="noopener noreferrer"&gt;http://127.0.0.1:7860&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;You can open this in any browser and start testing your custom trained chatbot. The port number above might be different for you.&lt;/p&gt;

&lt;p&gt;To train on more or different data, you can close using CTRL + C and change files and then run the python file again.&lt;/p&gt;

&lt;p&gt;If this article was helpful, maybe consider a clap or follow me back. &lt;/p&gt;

</description>
      <category>chatgpt</category>
      <category>ai</category>
      <category>python</category>
    </item>
    <item>
      <title>Use AI to create/generate charts/diagrams using Mermaid</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Wed, 28 Jun 2023 01:33:37 +0000</pubDate>
      <link>https://dev.to/0xmichaelwahl/use-ai-to-creategenerate-chartsdiagrams-using-mermaid-5g55</link>
      <guid>https://dev.to/0xmichaelwahl/use-ai-to-creategenerate-chartsdiagrams-using-mermaid-5g55</guid>
      <description>&lt;p&gt;I have spent several months experimenting with AI and AI-assistants, including OpenAI ChatGPT and LLM for example. &lt;/p&gt;

&lt;p&gt;There are so many different ways AI can help improve productivity, efficiency, accuracy, and accelerate processes by acting as a pilot or AI power Assistant. The outcomes, or work product/outputs are generally better content and deliverables. &lt;/p&gt;

&lt;p&gt;The are many different use cases for charts/diagrams, but within business operations, engineering, and software development, most of them are within relationships, sequence, components, data flows, and activity just to name a few of them. &lt;/p&gt;

&lt;p&gt;I used ChatGPT to help me generate the code, that I will use will use within the code section of the &lt;a href="https://mermaid.live" rel="noopener noreferrer"&gt;Mermaid Live Editor&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Switching over to ChatGPT for a moment, suppose I want to create/generate simple project/gantt chart. The prompt may look something like this. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Prompt&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Please suggest typical tasks to be done and generate the Gantt chart with 2 months of timeline using mermaid.js
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;strong&gt;Output&lt;/strong&gt;&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Sure! Here are some typical tasks that can be included in a project:

1. Project Planning
2. Requirement Gathering
3. Design and Wireframing
4. Development
5. Testing and Quality Assurance
6. Deployment
7. Documentation
8. User Acceptance Testing
9. Bug Fixes and Enhancements
10. Project Closure

Now, let's generate a Gantt chart with a 2-month timeline using Mermaid.js:

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;gantt&lt;br&gt;
  dateFormat  YYYY-MM-DD&lt;br&gt;
  title Project Timeline&lt;/p&gt;

&lt;p&gt;section Project Planning&lt;br&gt;
  Project Planning         :done,    des1, 2023-07-01, 7d&lt;/p&gt;

&lt;p&gt;section Development&lt;br&gt;
  Requirement Gathering    :active,  des2, 2023-07-08, 5d&lt;br&gt;
  Design and Wireframing   :         des3, 2023-07-13, 7d&lt;br&gt;
  Development              :         des4, 2023-07-20, 14d&lt;br&gt;
  Testing and QA           :         des5, 2023-08-03, 10d&lt;/p&gt;

&lt;p&gt;section Deployment&lt;br&gt;
  Deployment               :         des6, 2023-08-13, 3d&lt;br&gt;
  Documentation            :         des7, 2023-08-16, 5d&lt;/p&gt;

&lt;p&gt;section User Acceptance Testing&lt;br&gt;
  UAT                      :         des8, 2023-08-21, 7d&lt;/p&gt;

&lt;p&gt;section Bug Fixes and Enhancements&lt;br&gt;
  Bug Fixes and Enhancements :      des9, 2023-08-28, 7d&lt;/p&gt;

&lt;p&gt;section Project Closure&lt;br&gt;
  Project Closure          :         des10, 2023-09-04, 3d&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;
This Gantt chart provides an overview of the project tasks and their durations. It includes sections for Project Planning, Development, Deployment, User Acceptance Testing, Bug Fixes and Enhancements, and Project Closure. You can modify the task names, durations, and start dates according to your specific project requirements.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Now let's head over to &lt;a href="https://mermaid.live" rel="noopener noreferrer"&gt;Mermaid Live Editor&lt;/a&gt; and copy a portion of the output from ChatGPT to Mermaid Live under code, this will generate our project chart.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;gantt
  dateFormat  YYYY-MM-DD
  title Project Timeline

  section Project Planning
  Project Planning         :done,    des1, 2023-07-01, 7d

  section Development
  Requirement Gathering    :active,  des2, 2023-07-08, 5d
  Design and Wireframing   :         des3, 2023-07-13, 7d
  Development              :         des4, 2023-07-20, 14d
  Testing and QA           :         des5, 2023-08-03, 10d

  section Deployment
  Deployment               :         des6, 2023-08-13, 3d
  Documentation            :         des7, 2023-08-16, 5d

  section User Acceptance Testing
  UAT                      :         des8, 2023-08-21, 7d

  section Bug Fixes and Enhancements
  Bug Fixes and Enhancements :      des9, 2023-08-28, 7d

  section Project Closure
  Project Closure          :         des10, 2023-09-04, 3d

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzi4namfl8wfzm86eku3v.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fzi4namfl8wfzm86eku3v.png" alt="Image description" width="800" height="405"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Create a serverless API that interacts with an AWS DynamoDB table</title>
      <dc:creator>Michael Wahl</dc:creator>
      <pubDate>Mon, 26 Jun 2023 23:13:10 +0000</pubDate>
      <link>https://dev.to/aws-builders/create-a-serverless-api-that-interacts-with-an-aws-dynamodb-table-1gp6</link>
      <guid>https://dev.to/aws-builders/create-a-serverless-api-that-interacts-with-an-aws-dynamodb-table-1gp6</guid>
      <description>&lt;p&gt;When we call the API, our request is routed to a Lambda function, which performs the desired action between Lambda and DynamoDB. &lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fwxp5qwcvnptsh3d75s8p.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fwxp5qwcvnptsh3d75s8p.png" alt="Image description" width="718" height="111"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;**Create a DynamoDB table&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Open the DynamoDB console at &lt;a href="https://console.aws.amazon.com/dynamodb/" rel="noopener noreferrer"&gt;https://console.aws.amazon.com/dynamodb/&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;Choose Create table.&lt;/li&gt;
&lt;li&gt;For Table name, enter http-crud-tutorial-items.&lt;/li&gt;
&lt;li&gt;For Partition key, enter id.&lt;/li&gt;
&lt;li&gt;Choose Create table.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Create a Lambda function&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Sign in to the Lambda console at &lt;a href="https://console.aws.amazon.com/lambda" rel="noopener noreferrer"&gt;https://console.aws.amazon.com/lambda&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Choose Create function.&lt;/li&gt;
&lt;li&gt;For Function name, enter http-demo-function.&lt;/li&gt;
&lt;li&gt;Under Permissions choose Change default execution role.&lt;/li&gt;
&lt;li&gt;Select Create a new role from AWS policy templates.&lt;/li&gt;
&lt;li&gt;For Role name, enter http-demo-role.&lt;/li&gt;
&lt;li&gt;&lt;p&gt;For Policy templates, choose Simple microservice permissions.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Create function&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Open the index.mjs under the Code tab, and replace all the code with this sample code.&lt;br&gt;
&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;import { DynamoDBClient } from "@aws-sdk/client-dynamodb";
import {
  DynamoDBDocumentClient,
  ScanCommand,
  PutCommand,
  GetCommand,
  DeleteCommand,
} from "@aws-sdk/lib-dynamodb";

const client = new DynamoDBClient({});

const dynamo = DynamoDBDocumentClient.from(client);

const tableName = "items";

export const handler = async (event, context) =&amp;gt; {
  let body;
  let statusCode = 200;
  const headers = {
    "Content-Type": "application/json",
  };

  try {
    switch (event.routeKey) {
      case "DELETE /items/{id}":
        await dynamo.send(
          new DeleteCommand({
            TableName: tableName,
            Key: {
              id: event.pathParameters.id,
            },
          })
        );
        body = `Deleted item ${event.pathParameters.id}`;
        break;
      case "GET /items/{id}":
        body = await dynamo.send(
          new GetCommand({
            TableName: tableName,
            Key: {
              id: event.pathParameters.id,
            },
          })
        );
        body = body.Item;
        break;
      case "GET /items":
        body = await dynamo.send(
          new ScanCommand({ TableName: tableName })
        );
        body = body.Items;
        break;
      case "PUT /items":
        let requestJSON = JSON.parse(event.body);
        await dynamo.send(
          new PutCommand({
            TableName: tableName,
            Item: {
              id: requestJSON.id,
              price: requestJSON.price,
              name: requestJSON.name,
            },
          })
        );
        body = `Put item ${requestJSON.id}`;
        break;
      default:
        throw new Error(`Unsupported route: "${event.routeKey}"`);
    }
  } catch (err) {
    statusCode = 400;
    body = err.message;
  } finally {
    body = JSON.stringify(body);
  }

  return {
    statusCode,
    body,
    headers,
  };
};

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;ul&gt;
&lt;li&gt;Choose Deploy to update the function. &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Create an HTTP API&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Sign in to the API Gateway console at &lt;a href="https://console.aws.amazon.com/apigateway" rel="noopener noreferrer"&gt;https://console.aws.amazon.com/apigateway&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;Choose Create API, and then for HTTP API, choose Build.&lt;/li&gt;
&lt;li&gt;For API name, enter http-demo-api.&lt;/li&gt;
&lt;li&gt;Choose Next.&lt;/li&gt;
&lt;li&gt;Review the stage that API Gateway, choose Next.&lt;/li&gt;
&lt;li&gt;Choose Create.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Create Routes&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Visit &lt;a href="https://console.aws.amazon.com/apigateway" rel="noopener noreferrer"&gt;https://console.aws.amazon.com/apigateway&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Choose your API.&lt;/li&gt;
&lt;li&gt;Choose Routes.&lt;/li&gt;
&lt;li&gt;Choose Create.&lt;/li&gt;
&lt;li&gt;For Method, choose GET.&lt;/li&gt;
&lt;li&gt;For the path, enter /items/{id}. &lt;/li&gt;
&lt;li&gt;Choose Create.&lt;/li&gt;
&lt;li&gt;Now you need to repeat steps 4-7 for GET /items, DELETE /items/{id}, and PUT /items.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Create Integration&lt;/strong&gt;&lt;br&gt;
This is used to connect a route to backend resources. &lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Visit &lt;a href="https://console.aws.amazon.com/apigateway" rel="noopener noreferrer"&gt;https://console.aws.amazon.com/apigateway&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Choose your API.&lt;/li&gt;
&lt;li&gt;Choose Integrations.&lt;/li&gt;
&lt;li&gt;Choose Manage integrations, choose create.&lt;/li&gt;
&lt;li&gt;For Integration type, choose Lambda function.&lt;/li&gt;
&lt;li&gt;For Lambda function, enter http-demo-function.&lt;/li&gt;
&lt;li&gt;Choose Create.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Attach our integration to routes&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Visit &lt;a href="https://console.aws.amazon.com/apigateway" rel="noopener noreferrer"&gt;https://console.aws.amazon.com/apigateway&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Choose your API.&lt;/li&gt;
&lt;li&gt;Choose Integrations.&lt;/li&gt;
&lt;li&gt;Choose a route.&lt;/li&gt;
&lt;li&gt;Under Choose an existing integration, choose the http-demo-function.
Choose Attach integration.
Repeat these steps 4-6 for all the routes we created earlier. &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Testing our API&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Visit &lt;a href="https://console.aws.amazon.com/apigateway" rel="noopener noreferrer"&gt;https://console.aws.amazon.com/apigateway&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;Choose our API&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Take note of the API's invoke URL, and copy it for later. &lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Test creating or update an item&lt;br&gt;
&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;curl -X "PUT" -H "Content-Type: application/json" -d "{\"id\": \"123\", \"price\": 12345, \"name\": \"myitem\"}" https://abc123.execute-api.us-east-1.amazonaws.com/items

&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;ul&gt;
&lt;li&gt;Test getting ALL items
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;curl https://abc123.execute-api.us-east-1.amazonaws.com/items
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;ul&gt;
&lt;li&gt;Test getting an item
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;curl https://abc123.execute-api.us-east-1.amazonaws.com/items/123
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;


&lt;p&gt;Test deleting an item&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;curl -X "DELETE" https://abc123.execute-api.us-east-1.amazonaws.com/items/123
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhbbty2grl816eiickq51.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fhbbty2grl816eiickq51.png" alt="Image description" width="800" height="714"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnhu0xwh3xurog0lxb3qb.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnhu0xwh3xurog0lxb3qb.png" alt="Image description" width="800" height="750"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
      <category>tutorial</category>
      <category>api</category>
      <category>aws</category>
    </item>
  </channel>
</rss>
