<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: gitcommitshow</title>
    <description>The latest articles on DEV Community by gitcommitshow (@gitcommitshow).</description>
    <link>https://dev.to/gitcommitshow</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/gitcommitshow"/>
    <language>en</language>
    <item>
      <title>[Boost]</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Tue, 24 Mar 2026 15:15:42 +0000</pubDate>
      <link>https://dev.to/gitcommitshow/-2fb8</link>
      <guid>https://dev.to/gitcommitshow/-2fb8</guid>
      <description>&lt;div class="ltag__link--embedded"&gt;
  &lt;div class="crayons-story "&gt;
  &lt;a href="https://dev.to/gitcommitshow/github-action-to-make-your-website-analytics-fail-proof-and-free-from-vendor-lock-in-gb0" class="crayons-story__hidden-navigation-link"&gt;GitHub action to make your website analytics fail-proof and free from vendor lock-in&lt;/a&gt;


  &lt;div class="crayons-story__body crayons-story__body-full_post"&gt;
    &lt;div class="crayons-story__top"&gt;
      &lt;div class="crayons-story__meta"&gt;
        &lt;div class="crayons-story__author-pic"&gt;

          &lt;a href="/gitcommitshow" class="crayons-avatar  crayons-avatar--l  "&gt;
            &lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Fuser%2Fprofile_image%2F752045%2F6f90df04-b3af-4306-8deb-895b05c8dbaa.png" alt="gitcommitshow profile" class="crayons-avatar__image"&gt;
          &lt;/a&gt;
        &lt;/div&gt;
        &lt;div&gt;
          &lt;div&gt;
            &lt;a href="/gitcommitshow" class="crayons-story__secondary fw-medium m:hidden"&gt;
              gitcommitshow
            &lt;/a&gt;
            &lt;div class="profile-preview-card relative mb-4 s:mb-0 fw-medium hidden m:inline-block"&gt;
              
                gitcommitshow
                
              
              &lt;div id="story-author-preview-content-3395576" class="profile-preview-card__content crayons-dropdown branded-7 p-4 pt-0"&gt;
                &lt;div class="gap-4 grid"&gt;
                  &lt;div class="-mt-4"&gt;
                    &lt;a href="/gitcommitshow" class="flex"&gt;
                      &lt;span class="crayons-avatar crayons-avatar--xl mr-2 shrink-0"&gt;
                        &lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Fuser%2Fprofile_image%2F752045%2F6f90df04-b3af-4306-8deb-895b05c8dbaa.png" class="crayons-avatar__image" alt=""&gt;
                      &lt;/span&gt;
                      &lt;span class="crayons-link crayons-subtitle-2 mt-5"&gt;gitcommitshow&lt;/span&gt;
                    &lt;/a&gt;
                  &lt;/div&gt;
                  &lt;div class="print-hidden"&gt;
                    
                      Follow
                    
                  &lt;/div&gt;
                  &lt;div class="author-preview-metadata-container"&gt;&lt;/div&gt;
                &lt;/div&gt;
              &lt;/div&gt;
            &lt;/div&gt;

          &lt;/div&gt;
          &lt;a href="https://dev.to/gitcommitshow/github-action-to-make-your-website-analytics-fail-proof-and-free-from-vendor-lock-in-gb0" class="crayons-story__tertiary fs-xs"&gt;&lt;time&gt;Mar 24&lt;/time&gt;&lt;span class="time-ago-indicator-initial-placeholder"&gt;&lt;/span&gt;&lt;/a&gt;
        &lt;/div&gt;
      &lt;/div&gt;

    &lt;/div&gt;

    &lt;div class="crayons-story__indention"&gt;
      &lt;h2 class="crayons-story__title crayons-story__title-full_post"&gt;
        &lt;a href="https://dev.to/gitcommitshow/github-action-to-make-your-website-analytics-fail-proof-and-free-from-vendor-lock-in-gb0" id="article-link-3395576"&gt;
          GitHub action to make your website analytics fail-proof and free from vendor lock-in
        &lt;/a&gt;
      &lt;/h2&gt;
        &lt;div class="crayons-story__tags"&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/ai"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;ai&lt;/a&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/githubactions"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;githubactions&lt;/a&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/analytics"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;analytics&lt;/a&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/javascript"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;javascript&lt;/a&gt;
        &lt;/div&gt;
      &lt;div class="crayons-story__bottom"&gt;
        &lt;div class="crayons-story__details"&gt;
          &lt;a href="https://dev.to/gitcommitshow/github-action-to-make-your-website-analytics-fail-proof-and-free-from-vendor-lock-in-gb0" class="crayons-btn crayons-btn--s crayons-btn--ghost crayons-btn--icon-left"&gt;
            &lt;div class="multiple_reactions_aggregate"&gt;
              &lt;span class="multiple_reactions_icons_container"&gt;
                  &lt;span class="crayons_icon_container"&gt;
                    &lt;img src="https://assets.dev.to/assets/raised-hands-74b2099fd66a39f2d7eed9305ee0f4553df0eb7b4f11b01b6b1b499973048fe5.svg" width="18" height="18"&gt;
                  &lt;/span&gt;
                  &lt;span class="crayons_icon_container"&gt;
                    &lt;img src="https://assets.dev.to/assets/exploding-head-daceb38d627e6ae9b730f36a1e390fca556a4289d5a41abb2c35068ad3e2c4b5.svg" width="18" height="18"&gt;
                  &lt;/span&gt;
                  &lt;span class="crayons_icon_container"&gt;
                    &lt;img src="https://assets.dev.to/assets/multi-unicorn-b44d6f8c23cdd00964192bedc38af3e82463978aa611b4365bd33a0f1f4f3e97.svg" width="18" height="18"&gt;
                  &lt;/span&gt;
              &lt;/span&gt;
              &lt;span class="aggregate_reactions_counter"&gt;9&lt;span class="hidden s:inline"&gt; reactions&lt;/span&gt;&lt;/span&gt;
            &lt;/div&gt;
          &lt;/a&gt;
            &lt;a href="https://dev.to/gitcommitshow/github-action-to-make-your-website-analytics-fail-proof-and-free-from-vendor-lock-in-gb0#comments" class="crayons-btn crayons-btn--s crayons-btn--ghost crayons-btn--icon-left flex items-center"&gt;
              Comments


              &lt;span class="hidden s:inline"&gt;Add Comment&lt;/span&gt;
            &lt;/a&gt;
        &lt;/div&gt;
        &lt;div class="crayons-story__save"&gt;
          &lt;small class="crayons-story__tertiary fs-xs mr-2"&gt;
            4 min read
          &lt;/small&gt;
            
              &lt;span class="bm-initial"&gt;
                

              &lt;/span&gt;
              &lt;span class="bm-success"&gt;
                

              &lt;/span&gt;
            
        &lt;/div&gt;
      &lt;/div&gt;
    &lt;/div&gt;
  &lt;/div&gt;
&lt;/div&gt;

&lt;/div&gt;


</description>
      <category>ai</category>
      <category>githubactions</category>
      <category>analytics</category>
      <category>javascript</category>
    </item>
    <item>
      <title>GitHub action to make your website analytics fail-proof and free from vendor lock-in</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Tue, 24 Mar 2026 15:06:41 +0000</pubDate>
      <link>https://dev.to/gitcommitshow/github-action-to-make-your-website-analytics-fail-proof-and-free-from-vendor-lock-in-gb0</link>
      <guid>https://dev.to/gitcommitshow/github-action-to-make-your-website-analytics-fail-proof-and-free-from-vendor-lock-in-gb0</guid>
      <description>&lt;h2&gt;
  
  
  The Problem - Analytics breaks silently
&lt;/h2&gt;

&lt;p&gt;Website analytics issues do not surface as incidents the way other bugs do. They slip through the crack silently. You get to know about them only when you are looking at the revenue analytics dashboard and the metrics do not match your accounting books, your gut finally says, probably I should check the analytics instrumentation code for issues.&lt;/p&gt;

&lt;p&gt;Had this check existed on every PR that changed the analytics instrumentation code, none of this would have reached the dashboard. And you wouldn’t have made bad business decisions on faulty analytics data.&lt;/p&gt;

&lt;p&gt;Let's get it done in the next 15 mins!&lt;/p&gt;

&lt;h2&gt;
  
  
  First, make your analytics free from vendor lock-in using RudderStack SDK
&lt;/h2&gt;

&lt;p&gt;Currently you might have one or more analytics SDKs (Google Analytics, Amplitude, Google Ads, etc.) in your website. This slows down your website. And because they are tightly integrated with your app as code, if you want to switch to another vendor, you'll have to import their SDK and make the changes in the code. A long error-prone process.&lt;/p&gt;

&lt;p&gt;In the next 5 mins, you'll get rid of all of them without breaking the analytics.&lt;/p&gt;

&lt;p&gt;To do that, you need one SDK to rule them all. Provided by event streaming tools such as Segment or its self-hosted alternative RudderStack.&lt;/p&gt;

&lt;p&gt;After setting it up, you will be able to collect events from your websites (source) and send it to any analytics/marketing service of your choice (destination).&lt;/p&gt;

&lt;p&gt;With that in place, if you ever need to switch to a different analytics service, it will be a quick dashboard settings change, not a code change in your website.&lt;/p&gt;

&lt;p&gt;Here’s how to replace all your analytics and marketing tool SDKs with one RudderStack SDK&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqh1smnvddlsz8j24ysux.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fqh1smnvddlsz8j24ysux.png" alt="RudderStack setup steps" width="800" height="297"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Step 1:&lt;/strong&gt; &lt;a href="https://app.rudderstack.com/signup?type=freetrial" rel="noopener noreferrer"&gt;Create your dashboard&lt;/a&gt; to control your data sources/destinations settings. This dashboard controls only these settings. The actual customer event data will &lt;strong&gt;not&lt;/strong&gt; flow through this service. You will either self-host the event streaming server to process customer event data i.e. the data plane or use a cloud-hosted data plane to quickly get started. Source code - &lt;a href="https://github.com/rudderlabs/rudder-server" rel="noopener noreferrer"&gt;https://github.com/rudderlabs/rudder-server&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Step 2:&lt;/strong&gt; Replace all your existing analytics SDKs with one &lt;a href="https://www.rudderstack.com/docs/sources/event-streams/sdks/rudderstack-javascript-sdk/" rel="noopener noreferrer"&gt;RudderStack SDK&lt;/a&gt;&lt;br&gt;
tl;dr: add these 5 lines of code in your website&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight javascript"&gt;&lt;code&gt;&lt;span class="c1"&gt;// Step 1: Install the SDK - `npm i @rudderstack/analytics-js`&lt;/span&gt;
&lt;span class="c1"&gt;// Step 2: Initalize the SDK&lt;/span&gt;
&lt;span class="k"&gt;import&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt; &lt;span class="nx"&gt;RudderAnalytics&lt;/span&gt; &lt;span class="p"&gt;}&lt;/span&gt; &lt;span class="k"&gt;from&lt;/span&gt; &lt;span class="dl"&gt;'&lt;/span&gt;&lt;span class="s1"&gt;@rudderstack/analytics-js&lt;/span&gt;&lt;span class="dl"&gt;'&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;
&lt;span class="kd"&gt;const&lt;/span&gt; &lt;span class="nx"&gt;rudderAnalytics&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="k"&gt;new&lt;/span&gt; &lt;span class="nc"&gt;RudderAnalytics&lt;/span&gt;&lt;span class="p"&gt;();&lt;/span&gt;
&lt;span class="nx"&gt;rudderAnalytics&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;load&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="nx"&gt;process&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nx"&gt;env&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nx"&gt;WRITE_KEY&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="nx"&gt;process&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nx"&gt;env&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nx"&gt;DATA_PLANE_URL&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="p"&gt;{});&lt;/span&gt;
&lt;span class="c1"&gt;// Q: How to generate your WRITE_KEY and DATA_PLANE_URL?&lt;/span&gt;
&lt;span class="c1"&gt;// A: Create a new JavaScript source at https://app.rudderstack.com&lt;/span&gt;
&lt;span class="k"&gt;export&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt; &lt;span class="nx"&gt;rudderAnalytics&lt;/span&gt; &lt;span class="p"&gt;};&lt;/span&gt;
&lt;span class="c1"&gt;// Step 3: Call event tracking methods such as `page`, `track`, `identify`, etc. as needed&lt;/span&gt;
&lt;span class="nx"&gt;rudderAnalytics&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;page&lt;/span&gt;&lt;span class="p"&gt;();&lt;/span&gt;
&lt;span class="c1"&gt;// NOTICE: This code works only if you have set up your browser-side code to use `npm` modules. Follow the quickstart guide otherwise - https://www.rudderstack.com/docs/sources/event-streams/sdks/rudderstack-javascript-sdk/quickstart/&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;You made the change in your website code. Raised a PR, not merged yet.&lt;br&gt;
When you visit your test server link to your website. In your RudderStack source live events dashboard, you should now see the events flowing.&lt;br&gt;
&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fl2fmmgi6mzr626t1rygs.jpeg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fl2fmmgi6mzr626t1rygs.jpeg" alt="Live events screenshot" width="800" height="288"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;But events are not flowing to your previous destinations yet. So let’s fix that.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Step 3:&lt;/strong&gt; Add your original analytics/marketing services as destinations in your RudderStack dashboard to restore your old analytics services. &lt;a href="https://www.rudderstack.com/integration/?type=Destination" rel="noopener noreferrer"&gt;These services&lt;/a&gt; are supported as the destinations out of the box, others can be supported either via &lt;a href="https://www.rudderstack.com/docs/destinations/webhooks/setup-guide/" rel="noopener noreferrer"&gt;webhook&lt;/a&gt; or by building your own custom integration - &lt;a href="https://github.com/rudderlabs/rudder-transformer/blob/develop/CONTRIBUTING.md#building-your-first-custom-rudderstack-source-integration" rel="noopener noreferrer"&gt;https://github.com/rudderlabs/rudder-transformer/blob/develop/CONTRIBUTING.md#building-your-first-custom-rudderstack-source-integration&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffkxzwks2lydzl959be1m.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffkxzwks2lydzl959be1m.png" alt="Source and destination connections" width="800" height="406"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Now, when you visit your test server again, you should be able to see these events in your analytics services as they were flowing earlier. The &lt;a href="https://www.rudderstack.com/docs/dashboard-guides/live-events/#destination-live-events" rel="noopener noreferrer"&gt;destination live events&lt;/a&gt; will show you everything you need at this point.&lt;br&gt;
With the analytics instrumentation done, half the battle is won.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;You are in control now, free from the analytics vendor lock-in&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;You can merge at this step, but wait!&lt;br&gt;
Before you merge the PR, let’s do something that will ensure that you do not accidentally break your analytics in future.&lt;/p&gt;

&lt;p&gt;We will add a GitHub Action to review your PRs for analytics instrumentation and data quality issues.&lt;/p&gt;
&lt;h2&gt;
  
  
  Second, make your analytics fail-proof using AI-powered GitHub workflow
&lt;/h2&gt;

&lt;p&gt;Let’s use this GitHub action - &lt;a href="https://github.com/rudderlabs/rudder-ai-reviewer" rel="noopener noreferrer"&gt;https://github.com/rudderlabs/rudder-ai-reviewer&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;With this, you will never break your analytics instrumentation and your data quality issues will never end up in production.&lt;/p&gt;

&lt;p&gt;Create &lt;code&gt;.github/workflows/rudder-ai-reviewer.yml&lt;/code&gt; in your repo:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight yaml"&gt;&lt;code&gt;&lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Rudder AI Reviewer&lt;/span&gt;
&lt;span class="na"&gt;on&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;pull_request&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;types&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="pi"&gt;[&lt;/span&gt;&lt;span class="nv"&gt;opened&lt;/span&gt;&lt;span class="pi"&gt;,&lt;/span&gt; &lt;span class="nv"&gt;synchronize&lt;/span&gt;&lt;span class="pi"&gt;]&lt;/span&gt;

&lt;span class="na"&gt;permissions&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;contents&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;read&lt;/span&gt;        &lt;span class="c1"&gt;# Required to checkout the repository&lt;/span&gt;
  &lt;span class="na"&gt;pull-requests&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;write&lt;/span&gt;  &lt;span class="c1"&gt;# Required to post review comments&lt;/span&gt;

&lt;span class="na"&gt;jobs&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
  &lt;span class="na"&gt;review&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
    &lt;span class="na"&gt;runs-on&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;ubuntu-latest&lt;/span&gt;
    &lt;span class="na"&gt;steps&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;uses&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd&lt;/span&gt; &lt;span class="c1"&gt;# v5.0.1&lt;/span&gt;

      &lt;span class="pi"&gt;-&lt;/span&gt; &lt;span class="na"&gt;name&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;Rudder AI Reviewer&lt;/span&gt;
        &lt;span class="na"&gt;uses&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;rudderlabs/rudder-ai-reviewer@v1&lt;/span&gt;
        &lt;span class="na"&gt;with&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt;
          &lt;span class="na"&gt;source-id&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;${{ secrets.RUDDERSTACK_SOURCE_ID }}&lt;/span&gt;
          &lt;span class="na"&gt;service-access-token&lt;/span&gt;&lt;span class="pi"&gt;:&lt;/span&gt; &lt;span class="s"&gt;${{ secrets.RUDDERSTACK_SERVICE_ACCESS_TOKEN }}&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Commit, push, open a PR that touches any instrumentation code. The reviewer runs automatically. The action will detect the RudderStack SDK and the instrumentation changes automatically, review using AI, suggest fixes as inline review comments.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnmm0pxlacsobp4g501g9.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fnmm0pxlacsobp4g501g9.png" alt="Inline PR comment by AI PR Reviewer" width="800" height="639"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Now, you can go ahead and merge the PR if Rudder AI Reviewer does not give you any warning.&lt;/p&gt;

&lt;p&gt;In future, you'll automatically receive reviews on PRs that impacts the analytics instrumentation, data quality, and privacy compliances. You can further improve this workflow by creating a tracking plan in your RudderStack dashboard.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F90yypxmzz48l1emui7tk.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F90yypxmzz48l1emui7tk.png" alt="AI PR review summary" width="800" height="681"&gt;&lt;/a&gt;&lt;/p&gt;




&lt;p&gt;Go ahead and explore the tools I mentioned here for more details&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;a href="https://github.com/rudderlabs/rudder-server" rel="noopener noreferrer"&gt;https://github.com/rudderlabs/rudder-server&lt;/a&gt; | &lt;a href="https://www.rudderstack.com/docs/get-started/introduction/" rel="noopener noreferrer"&gt;Docs&lt;/a&gt;
&lt;/li&gt;
&lt;li&gt;
&lt;a href="https://github.com/rudderlabs/rudder-ai-reviewer" rel="noopener noreferrer"&gt;https://github.com/rudderlabs/rudder-ai-reviewer&lt;/a&gt; | &lt;a href="https://www.rudderstack.com/docs/ai-features/rudder-ai-reviewer/" rel="noopener noreferrer"&gt;Docs&lt;/a&gt;
&lt;/li&gt;
&lt;/ul&gt;

</description>
      <category>ai</category>
      <category>githubactions</category>
      <category>analytics</category>
      <category>javascript</category>
    </item>
    <item>
      <title>Mastering Code Review skills using AI tools</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Sun, 11 Feb 2024 18:30:00 +0000</pubDate>
      <link>https://dev.to/coderabbitai/mastering-code-review-skills-using-ai-tools-1pgc</link>
      <guid>https://dev.to/coderabbitai/mastering-code-review-skills-using-ai-tools-1pgc</guid>
      <description>&lt;p&gt;The skill of conducting effective code reviews is an important skill for developers. Code reviews ensure high-quality code, facilitate knowledge sharing, and foster a collaborative team environment. Despite its importance, many tech leads and developers find themselves either spending an excessive amount of time on code reviews or struggling to provide constructive feedback to pull request (PR) authors. &lt;/p&gt;

&lt;p&gt;Conversely, some colleagues seem to navigate code reviews with ease, showcasing that code review is indeed a skill that can be honed and improved.&lt;/p&gt;

&lt;p&gt;But the question remains: How does one improve their code review skills? Is it beneficial to seek feedback from a coworker, compare your code review techniques with those of your peers, or is there an alternative strategy to enhance this critical skill?&lt;/p&gt;

&lt;p&gt;Given the specificity of PRs to individual projects, generic feedback often falls short of being genuinely helpful. While coworkers can offer valuable insights, their availability to provide immediate feedback—when the context of the PR code changes is still fresh in your mind—can be limited.&lt;/p&gt;

&lt;p&gt;The solution is “Self-Assessment”. Assess your Code Review skills and then improve your weaknesses.&lt;/p&gt;

&lt;h2&gt;
  
  
  How to Assess Code Review Skills and Improve
&lt;/h2&gt;

&lt;p&gt;Embarking on a journey to refine your code review skills, self-assessment emerges as the most effective strategy. This approach not only fosters a deeper understanding of code review practices but also encourages a mindset of continuous improvement. Here's a detailed plan to self-assess and enhance your code review capabilities:&lt;/p&gt;

&lt;h3&gt;
  
  
  1. Set a Time Limit for Reviewing New PRs
&lt;/h3&gt;

&lt;p&gt;Begin by allocating a specific time frame, such as 15 minutes, to review new PRs. This constraint encourages efficiency and helps you focus on identifying the most critical aspects of the code without getting bogged down in minutiae.&lt;/p&gt;

&lt;h3&gt;
  
  
  2. Utilize Automated AI Tools
&lt;/h3&gt;

&lt;p&gt;After completing your review, employ automated AI-based code review tools such as &lt;a href="https://coderabbit.ai/" rel="noopener noreferrer"&gt;CodeRabbit&lt;/a&gt;. These tools can quickly analyze code for common issues, style inconsistencies, and potential bugs, providing an immediate second opinion on the PR.&lt;/p&gt;

&lt;h3&gt;
  
  
  3. Compare and Reflect
&lt;/h3&gt;

&lt;p&gt;Evaluate the feedback from the AI tools against your own review. Did you overlook anything obvious? Did you suggest any unnecessary optimizations? Reflecting on these comparisons helps you understand your review tendencies and biases, enabling you to adjust your approach accordingly.&lt;/p&gt;

&lt;h3&gt;
  
  
  4. Repeat and Learn
&lt;/h3&gt;

&lt;p&gt;Make this self-assessment process a regular practice for new PRs. Each iteration offers new learning opportunities, gradually enhancing your ability to conduct thorough and effective code reviews.&lt;/p&gt;

&lt;h3&gt;
  
  
  5. Seek Feedback from Coworkers
&lt;/h3&gt;

&lt;p&gt;In addition to self-assessment, actively seek out code reviews from your coworkers on your own contributions. This practice not only improves your code but also provides insight into different review styles and perspectives, enriching your understanding of effective code review practices.&lt;/p&gt;

&lt;h2&gt;
  
  
  Why Self-Assessment Works
&lt;/h2&gt;

&lt;p&gt;Self-assessment in code review serves multiple purposes. It not only accelerates your learning curve by providing immediate feedback but also cultivates a culture of self-improvement and accountability. By comparing your reviews with automated feedback and reflecting on the differences, you gain insights into your review process that would be difficult to achieve through external feedback alone.&lt;/p&gt;

&lt;h2&gt;
  
  
  The Benefits of Mastering Code Review
&lt;/h2&gt;

&lt;p&gt;Mastering the art of code review carries numerous benefits for both individual developers and their teams:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Enhanced Code Quality: Effective code reviews catch issues early, reducing bugs and improving the overall quality of the codebase.&lt;/li&gt;
&lt;li&gt;Knowledge Sharing: Code reviews serve as a platform for sharing knowledge and best practices, helping team members learn from each other and grow their skills.&lt;/li&gt;
&lt;li&gt;Faster Development Cycles: By identifying and addressing issues early in the development process, teams can reduce the time spent on debugging and rework, leading to faster development cycles.&lt;/li&gt;
&lt;li&gt;Improved Team Collaboration: Regular and constructive code reviews build trust among team members, foster a culture of open communication, and encourage collaborative problem-solving.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;Improving your code review skills is a journey that requires patience, practice, and a proactive approach to learning. By embracing self-assessment, utilizing tools at your disposal, and engaging with your team, you can transform code review from a daunting task into an opportunity for growth and collaboration. Remember, the goal of code review is not just to find faults but to collectively ensure the development of high-quality, maintainable, and efficient code. As you refine your code review skills, you'll find yourself not only becoming a more effective reviewer but also contributing to the success and resilience of your development team.&lt;/p&gt;

</description>
      <category>codereview</category>
      <category>codequality</category>
      <category>softwareengineering</category>
      <category>javascript</category>
    </item>
    <item>
      <title>Boosting Engineering Efficiency Using AI Code Reviews for Remote Teams</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Fri, 09 Feb 2024 18:30:00 +0000</pubDate>
      <link>https://dev.to/coderabbitai/boosting-engineering-efficiency-using-ai-code-reviews-for-remote-teams-44a4</link>
      <guid>https://dev.to/coderabbitai/boosting-engineering-efficiency-using-ai-code-reviews-for-remote-teams-44a4</guid>
      <description>&lt;p&gt;Welcome to the future, where morning commutes involve going from bed to home office. The dress code is "business on top, pajamas down below." In this new world of remote work, tech teams worldwide are getting good at video calls and wishing for strong Wi-Fi like strong coffee. But here's the question: How do we maintain engineering efficiency when Joe from frontend is in Mexico, and DevOps Dave just started his day in Dublin? This setup presents a unique challenge: ensuring that code reviews, which are essential for code quality, are consistent, timely, and efficient. Have you ever missed a code review because it was late at night? We've all been there. Are you waiting for days to get feedback because your reviewer is in a different time zone? Oh, the frustration! Introducing the helpful algorithm: AI-driven code reviews. They're like a reliable friend who never sleeps (because they're code) and knows all the coding rules. This article explores how these intelligent bots fill the gaps in our fast-paced, sometimes slow, new world.&lt;/p&gt;

&lt;h2&gt;
  
  
  Direct Correlation: Remote Engineering Challenges &amp;amp; AI Solutions&lt;a href="https://coderabbit.ai/blog/boosting-engineering-efficiency#direct-correlation-remote-engineering-challenges--ai-solutions" rel="noopener noreferrer"&gt;​&lt;/a&gt;
&lt;/h2&gt;

&lt;p&gt;Remember the good old days when you could easily ask a quick question to a colleague by just going to their desk? Those days are gone, just like floppy disks and dial-up internet. Nowadays, with remote work, we have traded cubicles for couches and water cooler chats for solo trips to the fridge. Let's address the three significant challenges of remote engineering and explore how AI's modern technology can provide a solution.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Communication Gaps:&lt;/strong&gt; We've all sent that "quick query" across the ocean and received a response when the moon's high in our sky. Time zones, while fantastic for the travel and holiday industry, can be the bane of a remote engineer's existence. The lag between question and answer and the lack of in-person interaction can make collaborating feel like you're screaming into a digital void.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;AI Solution:&lt;/strong&gt; AI doesn't need sleep (lucky them). They're the 24/7 store of the coding world, always open and ready to assist. Offering real-time feedback, irrespective of whether it's midday in Mumbai or twilight in Toronto, AI ensures that time zones remain a challenge only for your travel plans.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Delayed Reviews &amp;amp; Feedback Loops:&lt;/strong&gt; Here's a familiar scenario: You push code, sit back, and wait. And wait. And wait some more. Your code is in the ether, waiting for a review that's as elusive as a unicorn. The elongated feedback loops in remote settings can sometimes feel like a seemingly endless game of ping pong, where the ball... disappears.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;AI Solution:&lt;/strong&gt; Fancy a game-changer? AI provides immediate feedback. With algorithms working at the speed of computers, the waiting game is dramatically reduced. You push, AI reviews, and voila! Feedback's ready, hotter than a freshly brewed espresso.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Code Consistency &amp;amp; Quality:&lt;/strong&gt; Have you ever noticed how everyone's homemade bread looks and tastes slightly different? The same goes for code written by engineers scattered across various locales. Influenced by unique experiences and environments, each individual brings slight variances in coding style and approach.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;AI Solution:&lt;/strong&gt; Call AI the master baker, consistently churning out the perfect loaf every time. AI-driven code review tools maintain a unified standard, ensuring that whether it's Peter in Paris or Lila in Lagos, the quality and consistency of code remain top-notch.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Real-World Applications &amp;amp; Pitfalls&lt;a href="https://coderabbit.ai/blog/boosting-engineering-efficiency#real-world-applications--pitfalls" rel="noopener noreferrer"&gt;​&lt;/a&gt;
&lt;/h2&gt;

&lt;p&gt;In a world where data is king, we enjoy hearing success stories (especially when they include pie charts!). However, there are challenges to overcome when it comes to incorporating AI code reviews in a remote environment. Let's explore the real-life stories of AI experts and the significant obstacles they have overcome.&lt;/p&gt;

&lt;h3&gt;
  
  
  Common Pitfalls &amp;amp; Solutions&lt;a href="https://coderabbit.ai/blog/boosting-engineering-efficiency#common-pitfalls--solutions" rel="noopener noreferrer"&gt;​&lt;/a&gt;
&lt;/h3&gt;

&lt;p&gt;&lt;strong&gt;Over-reliance on AI:&lt;/strong&gt; Just as one wouldn't ask a Roomba to do a deep spring cleaning, leaning too much on AI for code reviews can miss out on the nuanced human touch. Solution: Tech Titans Inc. struck a balance by using AI for preliminary checks and human eyes for final reviews, ensuring that the code was technically sound and made logical sense in the grander scheme.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Resistance to Change:&lt;/strong&gt; Implementing new tools often meets resistance, especially if developers feel their expertise is being questioned. Startup Sensations Ltd. faced a mini rebellion of sorts. Solution: They organized workshops emphasizing AI as a tool to aid, not replace. Showcasing its strengths and limitations bridged the trust gap, smoothing the integration.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Misunderstanding AI Feedback:&lt;/strong&gt; Sometimes, AI can flag something as an error, even if it's a deliberate choice by the coder. This can lead to confusion and wasted effort trying to "fix" what isn't broken. Solution: Both companies implemented clear guidelines on understanding and acting upon AI feedback, ensuring developers knew when to consider and contest.&lt;/p&gt;

&lt;h2&gt;
  
  
  Actionable Takeaways
&lt;/h2&gt;

&lt;p&gt;Navigating the intricate maze of code reviewing can be daunting. But fret not! There are some practical steps and considerations to help steer the ship. And speaking of guiding lights, let's first mention a noteworthy tool that's caught the industry's attention.&lt;/p&gt;

&lt;h3&gt;
  
  
  CodeRabbit – An AI Code Reviewer&lt;a href="https://coderabbit.ai/blog/boosting-engineering-efficiency#coderabbit--an-ai-code-reviewer" rel="noopener noreferrer"&gt;​&lt;/a&gt;
&lt;/h3&gt;

&lt;p&gt;Heard of CodeRabbit? It's this nifty AI tool that's gaining traction. Without all the bells and whistles – it simply reviews your code once pull requests are made. It's a straightforward, no-fuss tool designed to streamline the review process, especially for remote teams.&lt;/p&gt;

&lt;h3&gt;
  
  
  Steps for Effective Integration and Adoption&lt;a href="https://coderabbit.ai/blog/boosting-engineering-efficiency#steps-for-effective-integration-and-adoption" rel="noopener noreferrer"&gt;​&lt;/a&gt;
&lt;/h3&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;strong&gt;Orientation:&lt;/strong&gt; It's crucial to acquaint your team with any new tool. With something like CodeRabbit, a simple hands-on session or tutorial might suffice.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Pilot Testing:&lt;/strong&gt; Test the waters first. Let's start with one project or a subset of your team to gauge the tool's efficiency and user-friendliness.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Constructive Feedback:&lt;/strong&gt; Encourage an open line of communication. Ensure your team provides feedback about the tool's strengths and areas needing tweaks.&lt;/li&gt;
&lt;/ol&gt;

&lt;h3&gt;
  
  
  Balancing AI Assistance with Human Touch&lt;a href="https://coderabbit.ai/blog/boosting-engineering-efficiency#balancing-ai-assistance-with-human-touch" rel="noopener noreferrer"&gt;​&lt;/a&gt;
&lt;/h3&gt;

&lt;p&gt;No matter how advanced our tools get, there's an underlying essence of human insight that can't be entirely replicated. To ensure the balance:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;
&lt;strong&gt;Sequential Reviews:&lt;/strong&gt; Let AI, like CodeRabbit, serve as the preliminary filter. The team should do subsequent, deeper reviews to capture nuances.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Regular Updates:&lt;/strong&gt; Keep the AI tool informed. Feedback from human reviewers refines its algorithm, making it more intuitive with time.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Encourage Team Discussions:&lt;/strong&gt; After automated reviews, foster team discussions. This ensures the code isn't just machine-compliant but also logically sound and efficient from a human perspective.&lt;/li&gt;
&lt;/ol&gt;

&lt;h2&gt;
  
  
  Conclusion&lt;a href="https://coderabbit.ai/blog/boosting-engineering-efficiency#conclusion" rel="noopener noreferrer"&gt;​&lt;/a&gt;
&lt;/h2&gt;

&lt;p&gt;Ah, the outcome! We've ventured deep into the rabbit hole (no pun intended with CodeRabbit) of AI's transformative role in remote engineering. From the highs of streamlined code reviews to the essential human-AI harmony, it's evident that AI doesn't just knock on the doors of modern engineering—it's barging in, holding a battering ram of innovation.&lt;/p&gt;

&lt;p&gt;Remember when remote work was a cute little option, often tucked away in the "benefits" section of a job posting? Well, it's not just mainstream now; it's the modus operandi for countless teams globally. As this mode of work continues to snowball, the technological advancements, especially ones like AI-driven code reviewers, aren't merely luxury add-ons. They are becoming vital cogs in the well-oiled machine that a remote engineering team aspires to be.&lt;/p&gt;

&lt;p&gt;As we stand on the brink of yet more seismic shifts in the way we work and collaborate, it's exhilarating to think of the untapped potential of tools and technologies still on the horizon. The canvas of remote work is vast, and we've only just started splashing it with color. Here's to a brighter, more innovative, and yes, more automated future, but always with a sprinkle of irreplaceable human magic.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;Cheers to the codes that bind us, both human and binary.&lt;/em&gt;&lt;/strong&gt;&lt;/p&gt;

</description>
      <category>ratelimits</category>
      <category>openai</category>
      <category>prioritization</category>
    </item>
    <item>
      <title>FluxNinja Aperture v1.0 - Managed rate-limiting service, batteries included</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Thu, 08 Feb 2024 05:50:09 +0000</pubDate>
      <link>https://dev.to/fluxninjahq/fluxninja-aperture-v10-managed-rate-limiting-service-batteries-included-1405</link>
      <guid>https://dev.to/fluxninjahq/fluxninja-aperture-v10-managed-rate-limiting-service-batteries-included-1405</guid>
      <description>&lt;p&gt;The FluxNinja team is excited to launch “rate-limiting as a service” for developers. This is a start of a new category of essential developer tools to serve the needs of the AI-first world, which relies heavily on effective and fair usage of programmable web resources.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Try out &lt;a href="https://fluxninja.com/" rel="noopener noreferrer"&gt;FluxNinja Aperture&lt;/a&gt; for rate limiting. Join our &lt;a href="https://discord.gg/U3N3fCZEPm" rel="noopener noreferrer"&gt;community on Discord&lt;/a&gt;, appreciate your feedback.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;FluxNinja is leading this new category of “managed rate-limiting service” with the first of its kind, reliable, and battle-tested product. After its first release in 2022, FluxNinja has gone through multiple iterations based on the&lt;br&gt;
feedback from the open source community and paid customers. We are excited to bring the stable version 1.0 of the service to the public.&lt;/p&gt;

&lt;h2&gt;
  
  
  The world needs a managed rate-limiting service
&lt;/h2&gt;

&lt;p&gt;Whether you are self-hosting a service or using a managed-service, balancing the cost and performance remains a challenge. When hosting on your own, you are responsible for scaling to keep up with demand while keeping costs under control. When using a managed service, you have to comply with their request quotas while keeping usage and costs under control.&lt;/p&gt;

&lt;p&gt;This is especially true for applications that use Large Language Models (LLMs). If using cloud-based LLMs, you have to comply with their rate-limits. If using self-hosted LLMs, you have to manage the infrastructure and ensure fair usage. And given the high cost of LLMs, and the shortage of resources such as GPUs, it is crucial to ensure fair usage and cost-efficiency.&lt;/p&gt;

&lt;p&gt;To ensure fair usage and deliver a good user experience while being profitable, developers need to code and manage rate limiting and caching infrastructure. It requires significant engineering efforts and expertise.&lt;/p&gt;

&lt;p&gt;FluxNinja Aperture solves this challenge of building and managing production-grade rate-limiting by providing a managed-rate-limiting service to enforce and comply with rate-limits based on various criteria such as:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Limits based on no. of requests per second&lt;/li&gt;
&lt;li&gt;Per-user limits based on consumed tokens&lt;/li&gt;
&lt;li&gt;Limits based on subscription plans&lt;/li&gt;
&lt;li&gt;Limits based on token-bucket algorithm&lt;/li&gt;
&lt;li&gt;Limits based on concurrency&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;FluxNinja utilizes a unique approach by separating rate-limiting infrastructure from the core application, which developers don’t need to code or manage anymore. They only need to integrate Aperture SDK, and then rate limiting policies can be updated via UI or API.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;We aim to bring production-grade rate-limiting to every app&lt;/p&gt;
&lt;/blockquote&gt;

&lt;h2&gt;
  
  
  Overview of FluxNinja Aperture
&lt;/h2&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fblog.fluxninja.com%2Fassets%2Fimages%2Farchitecture_1_dark-363d8b08ad52ae4729ba3924dd213c25.svg%23gh-dark-mode-only" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fblog.fluxninja.com%2Fassets%2Fimages%2Farchitecture_1_dark-363d8b08ad52ae4729ba3924dd213c25.svg%23gh-dark-mode-only" alt="Architecture - FluxNinja Aperture"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;With FluxNinja Aperture, application developers can enforce rate-limits on the usage of their services or comply with rate-limits of various external services. This ensures reliability of your services, fair usage and cost control.&lt;/p&gt;

&lt;p&gt;FluxNinja Aperture provides a managed rate-limiting service that handles the complexities behind the scenes, requiring only simple SDK integration in your application.&lt;/p&gt;

&lt;p&gt;These are the key features of FluxNinja Aperture rate-limiting service:&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Rate &amp;amp; Concurrency Limiting&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;Optimize cost and ensure fair access by implementing fine-grained rate-limits. Regulate the use of expensive pay-as-you-go APIs such as OpenAI and reduce the load on self-hosted models such as Mistral.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Caching&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;Cache LLM results and reuse them for similar requests to reduce cost and boost performance.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Request Prioritization&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;Manage utilization of constrained LLM resources at the level of each request by prioritizing paid over free tier users and interactive over background queries. Ensure fair access across users during peak usage hours.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Workload observability&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;Get unprecedented visibility into your workloads with detailed traffic analytics on request rates, tokens, and latencies sliced by features, users, request types, and any other arbitrary business attribute.&lt;/p&gt;

&lt;p&gt;For more info, check out &lt;a href="https://docs.fluxninja.com/" rel="noopener noreferrer"&gt;FluxNinja Aperture Docs&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Challenges with traditional rate-limiting solutions
&lt;/h2&gt;

&lt;p&gt;Traditional approaches to rate-limiting, typically involving custom-built solutions with in-memory data stores such as Redis, have presented significant&lt;br&gt;
challenges.&lt;/p&gt;

&lt;p&gt;Managing the codebase and infrastructure for rate-limiting demands regular attention from engineers and DevOps, incurring significant costs.&lt;/p&gt;

&lt;p&gt;API gateways work for limited use cases; they lack the context-specific understanding required for business aware rate-limiting (e.g., per-user limits or subscription-based restrictions).&lt;/p&gt;

&lt;p&gt;There is currently no ready-made solution where a distributed application needs to comply with rate-limits of an external service.&lt;/p&gt;

&lt;p&gt;These limitations highlight the need for a more efficient, context-aware, and easy-to-manage rate-limiting solution suitable for modern application demands.&lt;/p&gt;

&lt;h2&gt;
  
  
  How FluxNinja Aperture solves these gaps
&lt;/h2&gt;

&lt;p&gt;Aperture separates rate-limiting infrastructure from the application code. You can self-host it using the Aperture open source package or use the hosted solution - Aperture Cloud. To manage rate-limits, you only need to integrate Aperture SDKs in your programming language.&lt;/p&gt;

&lt;p&gt;Benefits compared to custom Redis-based or makeshift solutions:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;No need to code and manage complex rate-limiting algorithms and infrastructure&lt;/li&gt;
&lt;li&gt;Rate-limit policies and algorithms are updated centrally via UI or API rather than application code changes&lt;/li&gt;
&lt;li&gt;Real-time analytics dashboards to monitor and tune configurations&lt;/li&gt;
&lt;/ul&gt;

&lt;blockquote&gt;
&lt;p&gt;With FluxNinja Aperture, the heavy lifting is offloaded, allowing you to focus&lt;br&gt;
on business logic while still retaining control over policies.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;FluxNinja Aperture also integrates with existing service mesh and API gateways, giving a quick upgrade to your existing rate-limiting infrastructure.&lt;/p&gt;

&lt;p&gt;You can easily configure these constraints using Aperture policies. And then wrap your code block with Aperture SDK calls where you use these external or internal services. Using the Aperture Cloud UI, you’ll be able to monitor the&lt;br&gt;
workload and effectiveness of rate-limit policies.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fblog.fluxninja.com%2Fassets%2Fimages%2Fmonitoring-5b68575641e3007f078fb3a8ac4c1624.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fblog.fluxninja.com%2Fassets%2Fimages%2Fmonitoring-5b68575641e3007f078fb3a8ac4c1624.png" alt="Screenshot - Monitoring Feature"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Check out &lt;a href="https://docs.fluxninja.com/get-started/" rel="noopener noreferrer"&gt;this example&lt;/a&gt; to get started with enforcing or complying with rate-limits using FluxNinja Aperture.&lt;/p&gt;

&lt;h2&gt;
  
  
  Customer case study
&lt;/h2&gt;

&lt;p&gt;CodeRabbit is a leading AI Code Review tool and they are an early adopter of FluxNinja Aperture. The CodeRabbit app consumes several LLM APIs. They offer&lt;br&gt;
code review services through various subscription tiers to their users, including a free trial and an unlimited plan for open source projects. The high cost of LLM services and huge demand for their own service made it a challenge&lt;br&gt;
to offer an accessible pricing for their users while being cost-efficient. CodeRabbit uses FluxNinja Aperture to prioritize, cache, and rate-limit requests&lt;br&gt;
based on user tier preference and time criticality. FluxNinja helps them &lt;a href="https://blog.coderabbit.ai/blog/how-we-built-cost-effective-generative-ai-application" rel="noopener noreferrer"&gt;deliver a great user experience while being cost-efficient&lt;/a&gt;.&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion (tl;dr;)
&lt;/h2&gt;

&lt;p&gt;Rate limiting is crucial for web services, especially for those using Generative AI, to ensure fair usage, cost-efficiency, and a better user experience. Traditional methods often require heavy engineering work and struggle to address&lt;br&gt;
more nuanced needs such as user-specific or token-based limits.&lt;/p&gt;

&lt;p&gt;FluxNinja Aperture solves this by providing an SDK-driven managed-rate-limiting service, making it easy to enforce your own rate-limits and comply with rate limits of the services you use. With FluxNinja Aperture, teams do not need to&lt;br&gt;
invest their engineering bandwidth in building and maintaining complex rate limiting infrastructure. You can self-host FluxNinja Aperture on your premise or&lt;br&gt;
use the cloud offering at a nominal cost. It is as easy as integrating the FluxNinja SDK in your Node.js, Python, Golang, or Java backend apps.&lt;/p&gt;

&lt;p&gt;FluxNinja team is excited to unveil this tool publicly for developers. Join us in this journey of bringing production-grade rate-limiting to every app.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Visit &lt;a href="https://docs.fluxninja.com/" rel="noopener noreferrer"&gt;FluxNinja Aperture docs&lt;/a&gt; to get started with enforcing or complying with rate limits now.&lt;/p&gt;
&lt;/blockquote&gt;

</description>
      <category>launch</category>
      <category>ratelimiting</category>
      <category>generativeai</category>
      <category>aiops</category>
    </item>
    <item>
      <title>Prototype to Production Roadmap for Generative AI-based Products</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Tue, 06 Feb 2024 18:30:00 +0000</pubDate>
      <link>https://dev.to/fluxninjahq/prototype-to-production-roadmap-for-generative-ai-based-products-1idh</link>
      <guid>https://dev.to/fluxninjahq/prototype-to-production-roadmap-for-generative-ai-based-products-1idh</guid>
      <description>&lt;blockquote&gt;
&lt;p&gt;As we enter 2024, Generative AI-based applications are poised to become&lt;br&gt;
mainstream&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;Given Generative AI’s limitations at the start of 2023, the world was skeptical whether Generative AI would deliver tangible value to the businesses and to the customers. With the current state of Generative AI services, it seems totally possible. Many of us have by now built some prototypes of Generative AI-based apps that are effectively solving specific business problems and delivering concrete value to a small set of users.&lt;/p&gt;

&lt;p&gt;This was possible due to continuous improvements in Generative AI services from GPT-3.5 to GPT-4-Turbo, from LlaMa to Mistral, and many more incremental as well as disruptive developments. We were able to confidently use Generative AI services to deliver value consistently, and the dream of building useful Generative AI-based apps is not a dream anymore but a reality.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;In 2024, we will see massive adoption of such Generative AI-based products&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;After building a prototype, the next challenge one needs to solve is how to ship those prototypes to the hands of millions of such users reliably in production. And that is not yet done by many but has been proven to be possible.&lt;/p&gt;

&lt;p&gt;A prime example of this is &lt;a href="https://coderabbit.ai/"&gt;CodeRabbit&lt;/a&gt;, a leading AI Code Review tool that utilizes GPT for &lt;strong&gt;automating PR reviews&lt;/strong&gt;.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--nwoDxdOK--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://blog.fluxninja.com/assets/images/coderabbit-gpt-usage-58a219990c4174df8cac51e07abd031c.png" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--nwoDxdOK--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://blog.fluxninja.com/assets/images/coderabbit-gpt-usage-58a219990c4174df8cac51e07abd031c.png" alt="CodeRabbit’s monthly GPT API usage for the code review use case" width="500" height="221"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;CodeRabbit was launched in Sep 2023, and has already scaled to 1 million+ monthly requests served using GPT APIs. Its 100% success in delivering those requests and a satisfied customer base of 57000+ code repositories demonstrates the practical viability of Generative AI in building scalable businesses that deliver concrete value to users.&lt;/p&gt;

&lt;p&gt;Transitioning from a prototype to a production stage is not as easy though, it involves several challenges. These include managing operational costs, preventing service abuse, handling AI service outages, and maintaining a robust user experience while scaling to accommodate millions of users. CodeRabbit's journey exemplifies that with the right approach, these challenges can be overcome to achieve success in production.&lt;/p&gt;

&lt;p&gt;This article aims to guide you through the process of transitioning your Generative AI-based application from prototype to production. We will discuss strategies to address the common hurdles such as cost efficiency, reliability, scalability, and user experience optimization. The goal of this article is to provide a clear, technical roadmap for scaling your Generative AI application effectively.&lt;/p&gt;

&lt;h2&gt;
  
  
  Understanding foundational Generative AI models and services
&lt;/h2&gt;

&lt;p&gt;There are multiple foundational Generative AI models and services encompassing a wide range of technologies that have the capability to generate new content, solve problems, or process information in innovative ways. These services can be&lt;br&gt;
utilized to enable more specific use cases.&lt;/p&gt;

&lt;p&gt;These Generative AI models/services can be broadly categorized as follows:&lt;/p&gt;

&lt;h4&gt;
  
  
  Text generation and processing
&lt;/h4&gt;

&lt;p&gt;Leading models to generate or process text are - &lt;a href="https://openai.com/gpt-4"&gt;GPT-4&lt;/a&gt;, &lt;a href="https://mistral.ai/"&gt;Mistral&lt;/a&gt;, &lt;a href="https://www.anthropic.com/index/introducing-claude"&gt;Claude&lt;/a&gt;, &lt;a href="https://ai.meta.com/llama/"&gt;LlaMa&lt;/a&gt;, and so on. They can generate human-like text, answer questions, summarize content, translate languages, and more. Most of these models are also available as API service, so it is easier to implement those without worrying about the model deployment. But you have to do all other things related to productizing the solutions built on top of these services.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Example use cases:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automated Writing Assistants for Grammar checking, style improvement, and content generation, and so on.(such as Grammarly or ProWritingAid)&lt;/li&gt;
&lt;li&gt;Automatically generate draft blog posts and articles&lt;/li&gt;
&lt;li&gt;Create conversational dialogue for chatbots and virtual assistants&lt;/li&gt;
&lt;li&gt;Generate ideas and creative story premises for writers&lt;/li&gt;
&lt;li&gt;Summarize texts and documents for consumers&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Image generation
&lt;/h4&gt;

&lt;p&gt;Image Generation models such as &lt;a href="https://www.midjourney.com/"&gt;MidJourney&lt;/a&gt;, &lt;a href="https://stability.ai/"&gt;Stable Diffusion&lt;/a&gt;, &lt;a href="https://openai.com/dall-e-2"&gt;DALL-E&lt;/a&gt;, &lt;a href="https://imagen.research.google"&gt;Imagen&lt;/a&gt;, &lt;a href="https://imagen.research.google/editor/"&gt;Imagen Editor&lt;/a&gt; etc. can create images and artworks from textual descriptions.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Example use cases:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Generate unique profile pictures and avatars&lt;/li&gt;
&lt;li&gt;Create original artwork for digital artists and designers&lt;/li&gt;
&lt;li&gt;Produce images for marketing materials and social media posts&lt;/li&gt;
&lt;li&gt;Conceptualize product designs through visualizations&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Voice generation
&lt;/h4&gt;

&lt;p&gt;There are multiple high-quality Text-to-Speech (TTS) models and services available now that convert text into spoken voice, such as&lt;br&gt;
&lt;a href="https://elevenlabs.io/"&gt;ElevanLabs&lt;/a&gt;, &lt;a href="https://coqui.ai/"&gt;XTTS/Coqui&lt;/a&gt;, &lt;a href="https://deepmind.google/discover/blog/wavenet-a-generative-model-for-raw-audio/"&gt;WaveNet&lt;/a&gt;, &lt;a href="https://aws.amazon.com/polly/"&gt;Amazon Polly&lt;/a&gt;, etc.&lt;/p&gt;

&lt;h4&gt;
  
  
  Music or Sound generation
&lt;/h4&gt;

&lt;p&gt;Some of the popular music or sound generation models/service include &lt;a href="https://audiocraft.metademolab.com/musicgen.html"&gt;MusicGen&lt;/a&gt;, &lt;a href="https://audiocraft.metademolab.com/audiogen.html"&gt;AudioGen&lt;/a&gt;, &lt;a href="https://about.fb.com/news/2023/08/audiocraft-generative-ai-for-music-and-audio/"&gt;AudioCraft&lt;/a&gt;, &lt;a href="https://openai.com/research/jukebox"&gt;Jukebox&lt;/a&gt;, &lt;a href="https://magenta.tensorflow.org/"&gt;Magenta&lt;/a&gt;, &lt;a href="https://deepmind.google/discover/blog/wavenet-a-generative-model-for-raw-audio/"&gt;WaveNet&lt;/a&gt;, etc.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Example use cases:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Compose background music for videos and other multimedia&lt;/li&gt;
&lt;li&gt;Create custom ringtones and notification sounds&lt;/li&gt;
&lt;li&gt;Produce sound effects for games, VR, and AR experiences&lt;/li&gt;
&lt;li&gt;Generate musical ideas and samples for musicians&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Video generation
&lt;/h4&gt;

&lt;p&gt;This category is not as mature as other Generative AI categories and we might have to wait a bit more for improvements to see massive amounts of practical use&lt;br&gt;
cases. Presently, some of the popular models/services to generate video from text/image are - &lt;a href="https://runwayml.com/"&gt;RunwayML&lt;/a&gt;, &lt;a href="https://replicate.com/nightmareai/cogvideo"&gt;CogVideo&lt;/a&gt;, &lt;a href="https://imagen.research.google/video/"&gt;Imagen&lt;/a&gt;, &lt;a href="https://makeavideo.studio/"&gt;Make-a-Video&lt;/a&gt;, &lt;a href="https://phenaki.video/"&gt;Phenaki&lt;/a&gt;, &lt;a href="https://www.synthesia.io/"&gt;Synthesia&lt;/a&gt;, &lt;a href="https://stability.ai/stable-video"&gt;Stable Video&lt;/a&gt;, &lt;a href="https://blog.research.google/2023/12/videopoet-large-language-model-for-zero.html"&gt;VideoPoet&lt;/a&gt;,&lt;br&gt;
and so on.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Example use cases:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automatically produce training videos for educational purposes&lt;/li&gt;
&lt;li&gt;Create visual marketing content to promote brands and offerings&lt;/li&gt;
&lt;li&gt;Generate video templates and effects for editing&lt;/li&gt;
&lt;li&gt;Conceptualize scene frameworks for filmmakers and creators&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  General purpose
&lt;/h4&gt;

&lt;p&gt;Reinforcement learning models - can be optimized to complete various sequential decision-making tasks like game playing, autonomous robotics vehicle operations.&lt;br&gt;
Some examples in this category are - &lt;a href="https://www.nvidia.com/en-us/self-driving-cars/"&gt;NVIDIA DRIVE&lt;/a&gt; (self-driving solutions), &lt;a href="https://deepmind.google/discover/blog/alphazero-shedding-new-light-on-chess-shogi-and-go/"&gt;AlphaZero&lt;/a&gt; (chess game playing).&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Example use cases:&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Play games against humans by mastering gameplay strategy&lt;/li&gt;
&lt;li&gt;Control robotic systems to automate business processes&lt;/li&gt;
&lt;li&gt;Optimize machine behaviors for complex sequential tasks&lt;/li&gt;
&lt;li&gt;Develop product innovations through iterative simulated testing&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Future categories
&lt;/h4&gt;

&lt;p&gt;In 2024, we might see some more categories as more foundational models are created that are optimized for a specific task or industry and enabling more use&lt;br&gt;
cases. Some of the new categories we predict to get developed in 2024 will be related to data analysis, predictions, gaming, industrial automation, autonomous&lt;br&gt;
vehicles, healthcare diagnostic, adaptive learning, and explainable AI (XAI).&lt;/p&gt;

&lt;p&gt;You or your competitors might have built a prototype based on these foundational models or services already. If not, it is likely that you’ll do that in 2024.&lt;br&gt;
However, how do you move beyond the prototype and make it available in production to real users, and that too at a practical scale which drives significant impact?&lt;/p&gt;

&lt;h2&gt;
  
  
  Path from prototype to production
&lt;/h2&gt;

&lt;p&gt;Drawing on my personal experience and the insights gained from others, let me share the specific steps to take your Generative AI-based solution from prototype to production. I'll also provide tips for each step to take specific actions in your journey to productionize your Generative AI-based application.&lt;/p&gt;

&lt;h3&gt;
  
  
  1. Choose the right Generative AI model or service for the task
&lt;/h3&gt;

&lt;p&gt;Start with the basic understanding of various models that you can use and how they fit your requirements. The earlier section might have provided you a high-level overview. To move to the next step of choosing the right model or&lt;br&gt;
service, explore some of the popular comparison/benchmarks for Generative AI models such as:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;a href="https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard"&gt;Chatbot Arena Leaderboard&lt;/a&gt; provides comparison of various LLMs based on various benchmarks. Their &lt;a href="https://arena.lmsys.org/"&gt;official website&lt;/a&gt; provides more utilities for comparison.&lt;/li&gt;
&lt;li&gt;
&lt;a href="https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard"&gt;Open LLM Leaderboard&lt;/a&gt; provides comparison of Open Source LLMs based on various benchmarks.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;While these general benchmarks can help with high level filtering of the models/services you might want to use, one must test these models against their full application requirements using a large enough sample to prove production&lt;br&gt;
readiness. Assess accuracy, relevance, runtime, and other performance metrics for your use case.&lt;/p&gt;

&lt;h3&gt;
  
  
  2. Manage prompt engineering effectively
&lt;/h3&gt;

&lt;p&gt;To effectively use a Generative AI model/service, you need to provide and iterate on prompts. Your service quality depends on it. Which is why managing your prompts for the AI services is crucial in production. Following techniques&lt;br&gt;
can help&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Curate a library of tested base prompts - Start by gathering prompts used during prototyping that yield high quality, relevant outputs in your domain. These can serve as standard building blocks.&lt;/li&gt;
&lt;li&gt;Log all prompts and iterations - Track all prompts and model versions in your production systems, along with key metric scores. Analyze for continuous refinement.&lt;/li&gt;
&lt;li&gt;Implement prompt templating conventions - Structure prompts into clear components like task framing, content constraints, tone/style parameters, etc. to simplify iteration.&lt;/li&gt;
&lt;li&gt;Build a prompt enrichment pipeline - Augment prompts with external data like lexicons, knowledge bases, and human feedback to improve them over time.&lt;/li&gt;
&lt;li&gt;Control variations with conditional parameters - For user personalization or experimentation, rely more on conditional tuning of style, length, etc. rather than fully custom prompts.&lt;/li&gt;
&lt;li&gt;Allow spaces for innovation - Leave room within composable prompt templates to keep introducing and testing new creative variants.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;
  
  
  3. Monitor quality and mitigate hallucinations
&lt;/h3&gt;

&lt;p&gt;Once you see that your solution is working on your local or for some users, you must not stop there, you still need to think about quality control via monitoring and specifically to manage hallucinations. Some of the following&lt;br&gt;
techniques can help.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automate testing process to identify issues early - You can &lt;strong&gt;use LLM as an evaluation tool&lt;/strong&gt; to automate some of the tasks in the testing process. Run those tests before releasing the new version to customers.&lt;/li&gt;
&lt;li&gt;Set up monitoring for model drift - As data patterns/distributions shift over time, monitor drops in prompt effectiveness and update appropriately.&lt;/li&gt;
&lt;li&gt;Check outputs for consistency - Spot check generations directly for coherence, factuality, toxicity to catch model performance regressions requiring prompt tuning.&lt;/li&gt;
&lt;li&gt;Controlled beta access - We recommend releasing controlled beta access to test the product quality and identify hallucinations. It ensures the application hits quality requirements, establishes a clear user agreement, and protects key reputation aspects during closed beta with selective users.&lt;/li&gt;
&lt;li&gt;Human review as the last defending line - You should also have a human review step for some of the different test cases before deploying the release to customers.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;
  
  
  4. Wrap as a production API/service
&lt;/h3&gt;

&lt;p&gt;Expose core functionality via REST or other APIs for easy integration into the application front-end. Add input validation, authentication, monitoring, etc.&lt;/p&gt;

&lt;h3&gt;
  
  
  5. Establish scalable infrastructure
&lt;/h3&gt;

&lt;p&gt;Standard generative models have significant system resource demands. Even when you’re using the services which are taking care of those scalable infrastructure needs (e.g. GPT API), the number of requests you’ll be sending to those services&lt;br&gt;
will be quite high and that will require you to think about your infrastructure.&lt;br&gt;
Assess expected request loads and build a distributed cloud infrastructure for cost-efficient scalability. You will likely need to containerize using&lt;br&gt;
Docker/Kubernetes and set up auto-scaling.&lt;/p&gt;

&lt;h3&gt;
  
  
  6. Setup rate limiting for cost optimization and service abuse protection
&lt;/h3&gt;

&lt;p&gt;When it comes to load management, the requirements for Generative AI-based applications are way higher than your usual systems. The amount of service load is humongous as your system is talking to an external Generative AI service at a frequency which your normal app usually wouldn't need. You and your customers will frequently find dealing with errors such as “429 - Too many requests” and CPU usage going close to 100%. Avoiding these issues and the requirement of low latency is critical to retain your customers.&lt;/p&gt;

&lt;p&gt;It is not easy, and making your application accessible from a service availability or cost perspective is fundamental to grow adoption for your product. If you use the right tools, rate limiting and caching can be easy pickings in ensuring your service does not go out of service or becomes&lt;br&gt;
unsustainably expensive to manage.&lt;/p&gt;

&lt;p&gt;Tools such as &lt;a href="https://fluxninja.com"&gt;FluxNinja Aperture&lt;/a&gt; can be helpful, which are purposefully designed to protect Generative AI-based applications from abuse&lt;br&gt;
and high cost.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--lhHExiip--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://blog.fluxninja.com/assets/images/aperture-architecture-363d8b08ad52ae4729ba3924dd213c25.svg" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--lhHExiip--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://blog.fluxninja.com/assets/images/aperture-architecture-363d8b08ad52ae4729ba3924dd213c25.svg" alt="FluxNinja Aperture architecture" width="800" height="428"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;As shown in the above architecture diagram, FluxNinja Aperture or your own custom solution for rate limiting needs to sit in between your app’s backend and&lt;br&gt;
Generative AI service to takes care of:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Rate limiting request based on the quota for your Generative AI service&lt;/li&gt;
&lt;li&gt;Queuing requests to avoid overwhelming your system or the external Generative AI service system you use&lt;/li&gt;
&lt;li&gt;Prioritizing requests based on user tiers&lt;/li&gt;
&lt;li&gt;Caching requests to reduce external AI service costs and deliver results to user faster&lt;/li&gt;
&lt;li&gt;Monitor your service health and how you’re interacting with the external AI service&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Using FluxNinja, all these steps can be done with a simple sdk integration of FluxNinja in your code. It can provide you protection from abuse as well as control your costs.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--swuWaJ1P--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://blog.fluxninja.com/assets/images/api-producer-vs-consumer-needs-ea3968e22e9e33bcc1203aca559594fe.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--swuWaJ1P--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://blog.fluxninja.com/assets/images/api-producer-vs-consumer-needs-ea3968e22e9e33bcc1203aca559594fe.jpg" alt="Two key needs to improve user experience for Generative AI app" width="800" height="355"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In the end, this step will result in a better user experience and boost your app’s growth.&lt;/p&gt;

&lt;p&gt;Treat these 6 steps as a checklist for going from prototype to production.&lt;br&gt;
Appreciate your thoughts and tips to make this checklist even better.&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;The Generative AI-based application market is growing exponentially. There is an opportunity to use this to grow and stay ahead of your competition. Although it is not an easy task to take your Generative AI-based product from prototype to production, it is totally possible to achieve it, similar to &lt;a href="https://blog.fluxninja.com/blog/coderabbit-cost-effective-generative-ai"&gt;how CodeRabbit built a cost-effective Generative AI-based app&lt;/a&gt;. And you can make it easier by approaching in the organized manner and utilizing the right tools, as we discussed in this article. Some of the key points to remember are - choose the appropriate model, manage prompts effectively, use testing techniques specific to Generative AI-based apps, implement rate limiting and caching.&lt;/p&gt;

</description>
      <category>ai</category>
      <category>aiops</category>
      <category>ratelimiting</category>
      <category>tutorial</category>
    </item>
    <item>
      <title>Modern AI stack for developer productivity</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Fri, 05 Jan 2024 00:00:00 +0000</pubDate>
      <link>https://dev.to/coderabbitai/modern-ai-stack-for-developer-productivity-33kl</link>
      <guid>https://dev.to/coderabbitai/modern-ai-stack-for-developer-productivity-33kl</guid>
      <description>&lt;p&gt;The 'modern AI stack for developer productivity' refers to a comprehensive set of AI-powered developer tools that improve developer productivity in building software. In 2023, Large Language Models (LLMs) caused significant disruption, leading to a rapid increase in the adoption of artificial intelligence within the development lifecycle, particularly in the realm of 'developer productivity tools'. A significant majority of software development projects are now leveraging some form of AI, specifically Generative AI to transform traditional development workflows into more intelligent, efficient, and automated processes.&lt;/p&gt;

&lt;p&gt;The modern AI stack for developer productivity is reshaping the landscape of software development, making tasks that were once time-consuming or complex more manageable and automated. From helping with the research or code writing to reviewing code and ensuring quality, the modern AI stack is a testament to how AI is not just an add-on but an integral component in the software development process.&lt;/p&gt;

&lt;p&gt;Are you leveraging the full potential of the modern AI tech stack in your projects? This article might help you to get that perspective needed to understand how it might elevate your work to the next level.&lt;/p&gt;

&lt;h2&gt;
  
  
  Three Pillars of the &lt;strong&gt;Modern AI Stack for Developer Productivity&lt;/strong&gt;
&lt;/h2&gt;

&lt;p&gt;There are three key components in the modern AI stack for developer productivity that are useful in different stages of the development lifecycle. These three stages are the research or knowledge gathering stage, the coding stage, and the final code review stage. Let’s discuss each of these stages in detail and how AI tools can help improve developer productivity in each.&lt;/p&gt;

&lt;h3&gt;
  
  
  Knowledge
&lt;/h3&gt;

&lt;p&gt;The Knowledge pillar is central to the modern AI stack. It involves AI systems helping developers gather and synthesize knowledge, usually in the form of a chat or question-and-answer session. A prime example in this space is &lt;a href="https://chat.openai.com/" rel="noopener noreferrer"&gt;ChatGPT&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;a href="https://chat.openai.com/" rel="noopener noreferrer"&gt;ChatGPT&lt;/a&gt; is the leading AI assistant to quickly answer developers' questions on syntax, frameworks, debugging, etc.&lt;/li&gt;
&lt;li&gt;It acts like a supercharged search engine, saving developers time from having to dig through documentation or StackOverflow.&lt;/li&gt;
&lt;li&gt;ChatGPT can also explain concepts, provide code examples and suggestions, and identify knowledge gaps. Over time, these models will get better at technical reasoning with more training data.&lt;/li&gt;
&lt;li&gt;
&lt;a href="https://stackoverflow.co/labs/search/" rel="noopener noreferrer"&gt;StackOverflow Community Search&lt;/a&gt; is another product in this category which instantly summarizes the solution.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;This transformation is crucial in developing environments where quick access to information and rapid problem-solving are essential.&lt;/p&gt;

&lt;h4&gt;
  
  
  Challenges
&lt;/h4&gt;

&lt;p&gt;One of the main challenges is ensuring the accuracy and reliability of the answers. AI systems might sometimes generate plausible but incorrect or biased responses.&lt;/p&gt;

&lt;h3&gt;
  
  
  Code Generation
&lt;/h3&gt;

&lt;p&gt;Code generation through AI marks a significant leap in software development. AI models, trained on vast code repositories, can now assist in generating code snippets and at times the entire modules. This speeds up the coding process. The evolution of this pillar is a testament to AI's growing understanding of programming languages and logic, offering a collaborative tool that augments the developer's capabilities rather than replacing them.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;AI models like OpenAI’s GPT-4 Code Interpreter are leading this segment.&lt;/li&gt;
&lt;li&gt;They aid in writing code, offering suggestions, and even generating entire code blocks based on user input.&lt;/li&gt;
&lt;li&gt;They are particularly beneficial in increasing development speed and making coding more accessible to non-experts.&lt;/li&gt;
&lt;li&gt;
&lt;a href="https://github.com/features/copilot" rel="noopener noreferrer"&gt;GitHub Copilot&lt;/a&gt; introduces this experience in the IDE (such as VS Code) where you code. It enhances coding efficiency by rapidly suggesting code blocks and functions directly within the editor. This helps developers generate boilerplate code, complete repetitive tasks and implement common patterns much faster.&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Challenges
&lt;/h4&gt;

&lt;p&gt;The limitations include dependency on the training data, which may not always represent the most efficient or modern coding practices. Ethically, there are concerns about code originality and the potential for inadvertently generating vulnerable or buggy code.&lt;/p&gt;

&lt;h3&gt;
  
  
  Code Review
&lt;/h3&gt;

&lt;p&gt;AI’s role in code review is about ensuring quality, compliance, and optimization. Unlike traditional code reviews, which are time-consuming and prone to human oversight, AI-driven code reviews are swift and more thorough. AI models can scan code for patterns, anomalies, and compliance with coding standards, offering insights and suggestions for improvements. This pillar has evolved from basic syntax checking to sophisticated analysis, significantly enhancing the code quality.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Automated code review tools, like &lt;a href="https://coderabbit.ai/" rel="noopener noreferrer"&gt;CodeRabbit&lt;/a&gt;, help in identifying bugs, evaluating whether the PR achieves its objectives, and ensuring adherence to coding standards. The in-line comments make it easier to use and put things in motion.&lt;/li&gt;
&lt;li&gt;These tools can analyze code more thoroughly and quickly than human reviewers, leading to higher quality software. This frees up developer time as well as improves code quality before reaching production.&lt;/li&gt;
&lt;li&gt;Over time, CodeRabbit could fine-tune to a team's specific code review checklist and feedback provided in comments to provide even more accurate suggestions and extend this access to organization knowledge via code reviews naturally.&lt;/li&gt;
&lt;/ul&gt;

&lt;h4&gt;
  
  
  Challenges
&lt;/h4&gt;

&lt;p&gt;If there is not enough information about the requirements in the issues, the PR assessment against the requirement might not provide the accurate picture as you would expect.&lt;/p&gt;

&lt;h2&gt;
  
  
  Prioritize knowledge and review over generation
&lt;/h2&gt;

&lt;p&gt;While most people would be attracted by the promises code generation offers, I believe it will not have as big an impact on developer productivity as the other two - Knowledge and Code Review.&lt;/p&gt;

&lt;p&gt;Code Generation tools may save some time in writing standard code, understanding and fine-tuning the output remains crucial. But the risk of overreliance on AI for code generation can lead to code inaccuracies and legal issues with AI-generated code. The real productivity gains come from improving organizational knowledge and code review process to ensure high standards of code quality.&lt;/p&gt;

&lt;p&gt;As &lt;a href="https://stackoverflow.blog/2023/12/29/the-hardest-part-of-building-software-is-not-coding-its-requirements/" rel="noopener noreferrer"&gt;StackOverflow rightly mentioned&lt;/a&gt;&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;The hardest part of building software is not coding, it is requirements&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;Software is more than just code; it's about meeting the users' need. The knowledge and code review pillar tightly align with this goal. Which is why I urge you to prioritize Knowledge and Code Review tools in your modern AI stack.&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;The integration of these three pillars - Knowledge, Code Generation, and Code Review - forms a robust foundation in the AI-driven development process. Each pillar complements the others, creating a synergistic environment where developers are empowered with advanced tools and insights, leading to more efficient, innovative, and error-free software development.&lt;/p&gt;

</description>
      <category>developerproductivit</category>
      <category>developertools</category>
      <category>aitools</category>
      <category>aidevelopertools</category>
    </item>
    <item>
      <title>How we managed GPT-4 API cost at scale</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Fri, 29 Dec 2023 20:55:36 +0000</pubDate>
      <link>https://dev.to/coderabbitai/how-we-managed-gpt-4-api-cost-at-scale-2893</link>
      <guid>https://dev.to/coderabbitai/how-we-managed-gpt-4-api-cost-at-scale-2893</guid>
      <description>&lt;p&gt;Since its inception, &lt;a href="http://coderabbit.ai/" rel="noopener noreferrer"&gt;CodeRabbit&lt;/a&gt; has experienced steady growth in its user base, comprising developers and organizations. Installed on thousands of repositories, CodeRabbit reviews several thousand pull requests (PRs) daily. &lt;/p&gt;

&lt;p&gt;In this blog post, we will explore how CodeRabbit managed to deliver continuous, in-depth code analysis cost-effectively, while also providing a robust, free plan to Open Source projects.&lt;/p&gt;

&lt;h2&gt;
  
  
  Overview of CodeRabbit's Product and LLM Usage
&lt;/h2&gt;

&lt;p&gt;CodeRabbit is an AI-first PR Review tool that uses GPT APIs for various functionalities. CodeRabbit offers the following tiers of service:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;CodeRabbit Pro: A paid service providing in-depth code reviews for private repositories. It's priced according to the number of developers, starting with a full-featured 7-day free trial.&lt;/li&gt;
&lt;li&gt;CodeRabbit for Open Source: A free service offering in-depth code reviews for open source (public) repositories.&lt;/li&gt;
&lt;li&gt;CodeRabbit Free: A free plan for private repositories, providing summarization of code changes in a PR.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Our vision is to offer an affordable, AI-driven code review service to developers and organizations of all sizes while supporting the open source community. We are particularly mindful of open source projects, understanding the challenges in reviewing community contributions. Our goal is to reduce the burden of code reviews for open source maintainers by improving submission quality before the review process begins.&lt;/p&gt;

&lt;p&gt;CodeRabbit's review process is automatically triggered when a PR is opened in GitHub or GitLab. Each review involves a complex workflow that builds context and reviews each file using large language models (LLMs). Code review is a complex task that requires an in-depth understanding of the changes and the existing codebase. High-quality review comments necessitate state-of-the-art language models such as gpt-4. However, these models are significantly more expensive than simpler models, as shown by the &lt;a href="https://openai.com/pricing" rel="noopener noreferrer"&gt;10x-30x price difference&lt;/a&gt; between gpt-3.5-turbo and gpt-4 models.&lt;/p&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;Model&lt;/th&gt;
&lt;th&gt;Context Size&lt;/th&gt;
&lt;th&gt;Cost per 1k Input Tokens&lt;/th&gt;
&lt;th&gt;Cost per 1k Output Tokens&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;GPT-4&lt;/td&gt;
&lt;td&gt;Up to 32k&lt;/td&gt;
&lt;td&gt;$0.06&lt;/td&gt;
&lt;td&gt;$0.12&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;GPT-4&lt;/td&gt;
&lt;td&gt;Up to 8k&lt;/td&gt;
&lt;td&gt;$0.03&lt;/td&gt;
&lt;td&gt;$0.06&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;GPT-3.5 Turbo&lt;/td&gt;
&lt;td&gt;Up to 16k&lt;/td&gt;
&lt;td&gt;$0.003&lt;/td&gt;
&lt;td&gt;$0.004&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;GPT-3.5 Turbo&lt;/td&gt;
&lt;td&gt;Up to 4k&lt;/td&gt;
&lt;td&gt;$0.0015&lt;/td&gt;
&lt;td&gt;$0.002&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;

&lt;blockquote&gt;
&lt;p&gt;gpt-4 model is 10-30x more expensive than gpt-3.5-turbo model&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;Our primary cost driver is using OpenAI's API to generate code review comments. We will share our cost optimization strategies in the following sections. Without these optimizations, our free offering to open source projects would not be feasible.&lt;/p&gt;

&lt;p&gt;Let's take a look at the strategies that helped us optimize the cost and improve user experience.&lt;/p&gt;




&lt;h2&gt;
  
  
  1. Dual-models: Summarize &amp;amp; Triage Using Simpler Models
&lt;/h2&gt;

&lt;p&gt;For less complex tasks such as summarizing code diffs, simpler models such as gpt-3.5-turbo are adequate. As an initial optimization, we use a mix of models, as detailed in &lt;a href="https://coderabbit.ai/blog/coderabbit-openai-rate-limits" rel="noopener noreferrer"&gt;our earlier blog post&lt;/a&gt;. We use gpt-3.5-turbo to compress large code diffs into concise summaries, which are then processed by gpt-4 for reviewing each file. This dual-model approach significantly reduces costs and enhances review quality, enabling us to manage PRs with numerous files and extensive code differences.&lt;/p&gt;

&lt;p&gt;Additionally, we implemented triage logic to skip trivial changes from the review process. We use the simpler model to classify each diff as either trivial or complex, as part of the same prompt used for code diff summarization. Low-risk changes such as documentation updates, variable renames, and so on, are thus excluded from the thorough review process. This strategy has proven effective, as simpler models can accurately identify trivial changes.&lt;/p&gt;

&lt;p&gt;By using this dual-model approach for summarization and filtering out trivial changes, we save almost 50% on costs.&lt;/p&gt;

&lt;h2&gt;
  
  
  2. Rate-limiting: Enforcing Fair Usage
&lt;/h2&gt;

&lt;p&gt;Upon launching our free service for open source projects, we noticed individual developers using it as a coding co-pilot by making hundreds of incremental commits for continuous feedback. CodeRabbit, designed for thorough code reviews unlike tools such as GitHub Copilot, incurs high costs when used in this manner. Therefore, we implemented hourly rate-limits on the number of files and commits reviewed per user, to control excessive usage without compromising user experience. These limits vary across different product tiers. For example, we set more aggressive limits for open source users compared to trial and paid users.&lt;/p&gt;

&lt;p&gt;To implement these rate-limits, we evaluated various options for Serverless environments. We opted for &lt;a href="https://fluxninja.com/" rel="noopener noreferrer"&gt;FluxNinja Aperture&lt;/a&gt; for its simplicity and policy sophistication. We were already using Aperture for managing &lt;a href="https://coderabbit.ai/blog/coderabbit-openai-rate-limits" rel="noopener noreferrer"&gt;OpenAI rate limits&lt;/a&gt;, making it a natural choice for our rate-limiting needs as well.&lt;/p&gt;

&lt;p&gt;In FluxNinja Aperture, policies are decoupled from application logic through labels, enabling new policy additions without altering application code. We apply labels in FluxNinja Aperture, wrap the review workload with its SDK, and write policies that enforce limits on those labels. For example, we enforce a 3 reviews per hour limit (1 review every 20 minutes) for open source users, allowing a burst of 2 back-to-back reviews, as shown in the screenshots below.&lt;/p&gt;

&lt;p&gt;Integration with FluxNinja Aperture SDK&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fv4qqzoga5f40gtdiau8c.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fv4qqzoga5f40gtdiau8c.png" title="Rate limiting commits per hour for open source users" alt="Rate limiting commits per hour for open source users" width="800" height="219"&gt;&lt;/a&gt;&lt;br&gt;
 Rate limiting commits per hour for open source users&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftq2fzxiij5i2lie57djh.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ftq2fzxiij5i2lie57djh.png" title="Wait time feedback to the user in a comment" alt="Wait time feedback to the user in a comment" width="800" height="304"&gt;&lt;/a&gt;&lt;br&gt;
 Wait time feedback to the user in a comment&lt;/p&gt;

&lt;p&gt;Given the high cost and capacity constraints of state-of-the-art models such as gpt-4, rate-limiting is an essential requirement for any AI application. By implementing fair-usage rate limits, we are saving almost 20% on our costs.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjigfqk0nonxszj1q52ti.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fjigfqk0nonxszj1q52ti.png" title="image_tRate limit metrics for open source usersooltip" alt="Rate limit metrics for open source users" width="800" height="211"&gt;&lt;/a&gt;&lt;br&gt;
 Rate limit metrics for open source users&lt;/p&gt;

&lt;h2&gt;
  
  
  3. Caching: Avoid Re-generating Similar Review Comments
&lt;/h2&gt;

&lt;p&gt;We believe that building user habits around AI involves seamlessly augmenting existing workflows. Therefore, AI code reviews must be continuous: they should trigger as soon as a PR is opened and incrementally update the summary and generate review comments as more commits are added.&lt;/p&gt;

&lt;p&gt;However, this approach can become expensive and generate repetitive feedback, as similar review comments are re-generated for each commit. We observed that most incremental commits involve minor adjustments or bug fixes in the initial implementation. To address this, we implemented a caching layer to avoid re-generating similar review comments for incremental commits.&lt;/p&gt;

&lt;p&gt;Fortunately, Aperture also provides a simple caching mechanism for summaries from previous commits, using the same API call where we implemented rate limits. During each incremental review, we use the simpler model for a semantic comparison of the code changes described in both summaries. If the changes are similar, we skip the review for those files to prevent re-generating similar review comments. This method differs from vector similarity-based caching techniques, as we use an LLM model for comparing summaries. Vector similarity-based approaches wouldn't be effective in our case, as the summaries require semantic comparison. We have integrated this method into the same prompt used for code diff summarization and triage.&lt;/p&gt;

&lt;p&gt;By using the more cost-effective gpt-3.5-turbo model as an advanced similarity filter before invoking the more expensive gpt-4 model for the same file, we have saved almost 20% of our costs by avoiding the generation of similar review comments.&lt;/p&gt;




&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;In this blog post, we briefly discussed how state-of-the-art LLMs such as gpt-4 can be expensive in production. We also shared our strategy of using a combination of simpler models, rate limits, and caching to optimize operational costs. We hope our experiences can assist other AI startups in optimizing their costs and developing cost-effective AI applications.&lt;/p&gt;

</description>
      <category>ratelimit</category>
      <category>ai</category>
      <category>aiops</category>
      <category>gpt</category>
    </item>
    <item>
      <title>Does this PR effectively resolve the linked issues, here's how AI can help answer this</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Mon, 18 Dec 2023 05:11:23 +0000</pubDate>
      <link>https://dev.to/gitcommitshow/ai-can-now-effectively-assess-if-a-pr-resolves-the-linked-issues-or-not-heres-how-19k8</link>
      <guid>https://dev.to/gitcommitshow/ai-can-now-effectively-assess-if-a-pr-resolves-the-linked-issues-or-not-heres-how-19k8</guid>
      <description>&lt;p&gt;In this blog, we'll explore the common challenges developers encounter in assessing PR against an issue and then we'll introduce &lt;a href="https://coderabbit.ai/blog/coderabbit-deep-dive" rel="noopener noreferrer"&gt;CodeRabbit&lt;/a&gt;, a leading AI-driven PR review copilot, as an automated solution to assess PRs.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Check out an example of assessing PR against the linked issues for PR &lt;a href="https://github.com/autolab/Autolab/pull/2016" rel="noopener noreferrer"&gt;autolab/Autolab/pull/2016&lt;/a&gt; (this assessment was done by &lt;a href="https://coderabbit.ai" rel="noopener noreferrer"&gt;CodeRabbit&lt;/a&gt; PR review bot).&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fefey3cniawdqfqji7z4b.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fefey3cniawdqfqji7z4b.jpg" alt="PR assessment demo screenshot"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Let’s deep dive and understand the topic of PR assessment&lt;/p&gt;

&lt;h2&gt;
  
  
  Assessing PR against linked issues
&lt;/h2&gt;

&lt;p&gt;For a PR reviewer and author, the most important question is:&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;How effectively does this PR address the issue?&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;It is the job of us developers to evaluate how effectively the PR addresses the objectives of the issues. Although it is obvious to many of us but understanding the context of PR assessment deeply will help us make better and quicker judgement as developers.&lt;/p&gt;

&lt;p&gt;Before we dive into how, let's understand why.&lt;/p&gt;

&lt;h3&gt;
  
  
  Why do we need to assess whether a PR effectively achieves the issue objectives
&lt;/h3&gt;

&lt;p&gt;Assessing whether a Pull Request (PR) effectively addresses an issue is a fundamental aspect of PR review process for several reasons:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Ensuring correctness:&lt;/strong&gt; It's crucial for maintaining the integrity and functionality of the software.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Quality control:&lt;/strong&gt; By rigorously evaluating a PR against the issue it purports to resolve, the team can maintain high standards of quality. This process helps in identifying any potential bugs, performance issues, or unintended consequences introduced by the PR. Having the assessment written in the PR gives a good sense to the author and the peer reviewer.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Prevent unnecessary work:&lt;/strong&gt; Effective assessment ensures that time and effort are not wasted on PRs that do not fully resolve issues, preventing the need for additional fixes or rework in the future.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;User satisfaction:&lt;/strong&gt; In cases where issues are user-reported or significantly affect user experience, ensuring that a PR effectively resolves the issue is key to maintaining user trust and satisfaction.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Learning and improvement:&lt;/strong&gt; The process of assessment provides valuable learning opportunities for developers. It allows them to understand more about the system, the nature of the problem, and the effectiveness of their solution.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;
  
  
  What stops developers from assessing PR against an issue correctly
&lt;/h3&gt;

&lt;p&gt;There are several challenges associated with this assessment:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Time pressure:&lt;/strong&gt; Under tight deadlines, there might be a rush to get PRs merged, which can lead to insufficient assessment and oversight.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Limited context:&lt;/strong&gt; Reviewers might not always have full context or deep knowledge about every aspect of the issue, which can lead to challenges in accurately assessing the PR.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Complexity of Issues:&lt;/strong&gt; Some issues can be complex, with subtle nuances or hidden dependencies. Understanding whether a PR fully addresses such issues can be difficult.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Subjective evaluation:&lt;/strong&gt; Assessment can sometimes be subjective, especially in cases where the issue and the solution are not strictly technical but involve user experience, design, or other qualitative aspects.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Inadequate Testing:&lt;/strong&gt; In some cases, the lack of comprehensive testing can hinder the assessment. If a PR is not thoroughly tested in various scenarios, it might be difficult to ensure it completely resolves the issue.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;So what can we do about it?&lt;/p&gt;

&lt;h3&gt;
  
  
  Solution : CodeRabbit’s AI-powered assessment against linked issues
&lt;/h3&gt;

&lt;p&gt;CodeRabbit PR review bot analyzes the issue in depth, understanding the objectives. Then based on the code diff, it analyzes whether those objectives are met or not. The result along with the explanation is shown in the PR summary or comment. The PR author or peer reviewer then quickly get a sense which objectives were met (marked ✅), which which not met (marked ❌), and which were unclear to make a judgment (marked ❓).  &lt;/p&gt;

&lt;p&gt;See it in action:&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/autolab/Autolab/pull/2016" rel="noopener noreferrer"&gt;autolab/Autolab/pull/2016&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fefey3cniawdqfqji7z4b.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fefey3cniawdqfqji7z4b.jpg" alt="PR assessment demo screenshot"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/appsmithorg/appsmith/pull/29375" rel="noopener noreferrer"&gt;appsmithorg/appsmith/pull/29375&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F7mv3c8sgpn0ytqixomlp.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F7mv3c8sgpn0ytqixomlp.jpg" alt="PR assessment demo screenshot"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/SickChill/sickchill/pull/8640" rel="noopener noreferrer"&gt;SickChill/sickchill/pull/8640&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fp329knmq3ku09ffkch8l.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fp329knmq3ku09ffkch8l.jpg" alt="PR assessment demo screenshot"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;To get this feature, just install the &lt;a href="https://coderabbit.ai/" rel="noopener noreferrer"&gt;CodeRabbit PR review app&lt;/a&gt; in your GitHub repository. Deep dive into &lt;a href="https://coderabbit.ai/blog/coderabbit-deep-dive" rel="noopener noreferrer"&gt;how CodeRabbit works&lt;/a&gt;.&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;It is a fundamentally essential task for PR authors and reviewers to assess whether the PR effectively resolves the related issues or not. We discussed how this simple-looking task could be challenging due to reasons such as limited context and time pressure. &lt;a href="http://coderabbit.ai/" rel="noopener noreferrer"&gt;CodeRabbit&lt;/a&gt;’s AI-powered PR review bot solves these challenges by automatically validating linked issues against PR. &lt;/p&gt;

</description>
      <category>codereview</category>
      <category>codequality</category>
      <category>github</category>
      <category>githubactions</category>
    </item>
    <item>
      <title>AI-powered GitHub app to automatically link issues in a PR</title>
      <dc:creator>gitcommitshow</dc:creator>
      <pubDate>Sat, 09 Dec 2023 03:56:42 +0000</pubDate>
      <link>https://dev.to/gitcommitshow/ai-powered-github-app-to-automatically-link-issues-in-a-pr-4idj</link>
      <guid>https://dev.to/gitcommitshow/ai-powered-github-app-to-automatically-link-issues-in-a-pr-4idj</guid>
      <description>&lt;p&gt;It was year 2012, I was a junior developer back then. My colleague watches me copying link to a GitHub issue and sharing it in a Pull Request (PR). He comes to me and tells me that I can simply write the issue no. and it will be automatically linked. I am blown away with this tiny feature because it solves a major pet peeve I have.&lt;/p&gt;

&lt;p&gt;If we can go back in time to tell this guy that you don't even need to search and write the issue no. and your related issues will automatically get linked in PR, younger me would probably go uncontrollably mad with the the joy.&lt;/p&gt;

&lt;p&gt;In case you didn't know, it is here! We don't need to link issues in a PR now, an AI code review bot does a better job than us humans in finding the relevant issues and linking those with the PR.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;We might do a good job in linking just the one issue for which we worked on that PR but this bot can also find all those issues this PR impacts. Seemingly tiny but impactful feature.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;In this blog, we'll explore the reasons why one must link related issues to PRs. We'll highlight the common challenges developers encounter in doing so. And then we'll introduce &lt;a href="https://coderabbit.ai/blog/coderabbit-deep-dive"&gt;CodeRabbit&lt;/a&gt;, a leading AI-driven PR review copilot, as an automated solution to linking issues with PR.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Check out an example of linking related issues for &lt;a href="https://github.com/okp4/ontology/pull/220"&gt;okp4/ontology/pull/220&lt;/a&gt; (these issues were linked automatically by &lt;a href="https://coderabbit.ai"&gt;CodeRabbit&lt;/a&gt; PR review bot).&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--UDU3Ktuf--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/f0y805uggvver0f0ed2f.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--UDU3Ktuf--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/f0y805uggvver0f0ed2f.jpg" alt="Image description" width="800" height="450"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Let’s deep dive and understand the topic thoroughly.&lt;/p&gt;

&lt;h2&gt;
  
  
  Linking related issues to PR
&lt;/h2&gt;

&lt;p&gt;For developers and PR reviewers, there are two fundamental questions to consider for every PR:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;What is the key issue this PR is tackling?&lt;/li&gt;
&lt;li&gt;What are the broader implications of this PR on other existing issues?&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;And these questions are answered by identifying and connecting related issues to the PR. So this is apparent for many of us developers but let's understand the context in depth what does issue linking helps with.&lt;/p&gt;

&lt;h3&gt;
  
  
  Why do we need to link the related issues for a PR?
&lt;/h3&gt;

&lt;p&gt;Finding related issues for a Pull Request (PR) is an essential practice with significant importance for several reasons:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Prevents redundant work:&lt;/strong&gt; When issues are not linked to PRs, developers miss to close them, it can lead to issues not getting closed even after it has been fixed. Another developer spends time researching and developing only to realize later that it was already fixed by another issue. Another common problem is often duplicate issues get logged due to a disconnect. Linking issues to PRs prevents them from remaining open after resolution, averting duplicate efforts and confusion caused by other developers unknowingly working on these fixed issues. It streamlines the development process.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Better PR reviews:&lt;/strong&gt; When reviewers are aware of related issues, they can provide more informed and comprehensive feedback. This helps in identifying potential oversights or conflicts that might arise due to the new changes.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Better issue tracking and documentation:&lt;/strong&gt; When PRs are linked to issues, it creates a clearer historical record of why changes were made, which is valuable for future reference and new team members. If there are non technical folks on the team, they also get a good sense on what issues are now solved thanks to this PR.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Fosters collaboration:&lt;/strong&gt; When an issue is linked to PR team members can see the context and history of a problem, which helps with better understanding and more efficient problem-solving.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Improved code quality:&lt;/strong&gt; Understanding the wider implications of PR changes ensures that the code not only fixes a specific problem but also aligns with related issues.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;
  
  
  What makes it hard for developers to link related issues in PR
&lt;/h3&gt;

&lt;p&gt;Let's be honest, finding and linking these issues is neither  a developer's favorite pastime nor it is as easy as it sounds. Let’s explore the challenges developers face in linking related issues:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Oversight:&lt;/strong&gt; Many times, developers are not aware of larger impacts to the code base. Which leads them to miss linking all the issues the PR is addressing. This is more common where the project and the codebase is not small enough or requires a significant amount of collaboration with others.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Lack of adherence to standard practices:&lt;/strong&gt; Inconsistencies in how issues are reported, described, or tagged can make it difficult to find related issues. Lack of standard practices in issue tracking can lead to inefficiencies.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Searching in a large project takes time:&lt;/strong&gt; For large projects with extensive issue histories, manually searching and identifying related issues can be a daunting and time-consuming task.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Difficulty in identifying relationships:&lt;/strong&gt; Understanding the interdependencies between different parts of a complex codebase can be challenging.  This complexity might cause developers to overlook related issues.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Information overload:&lt;/strong&gt; In large projects, the sheer volume of issues can be overwhelming, making it difficult to sift through and identify the most relevant ones.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;
  
  
  Solution: CodeRabbit (a PR review bot) automatically finds those issues using AI
&lt;/h3&gt;

&lt;p&gt;CodeRabbit, an AI-powered PR review bot, simplifies this task by analyzing GitHub issues, performing the similarity search and linking the most relevant ones to your PR. See it in action:&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/okp4/ontology/pull/220"&gt;okp4/ontology/pull/220&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--UDU3Ktuf--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/f0y805uggvver0f0ed2f.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--UDU3Ktuf--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/f0y805uggvver0f0ed2f.jpg" alt="Image description" width="800" height="450"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/ethereum-optimism/docs/pull/261"&gt;ethereum-optimism/docs/pull/261&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--d2X0cS7---/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/0j4rdge1ilb4imez6ecs.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--d2X0cS7---/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/0j4rdge1ilb4imez6ecs.jpg" alt="Image description" width="800" height="333"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://github.com/usatie/pong/pull/111"&gt;usatie/pong/pull/111&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://res.cloudinary.com/practicaldev/image/fetch/s--cU6pUp8y--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/w9lm4ao3v78y97ycjn1m.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://res.cloudinary.com/practicaldev/image/fetch/s--cU6pUp8y--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_800/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/w9lm4ao3v78y97ycjn1m.jpg" alt="Image description" width="800" height="306"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  How to automate issue linking with PR
&lt;/h2&gt;

&lt;p&gt;To get this feature, just install the &lt;a href="https://coderabbit.ai/"&gt;CodeRabbit PR review app&lt;/a&gt; in your GitHub repository. That's it. If you're curious to understand how does app work behind the scenes, &lt;a href="https://coderabbit.ai/blog/coderabbit-deep-dive"&gt;check out this CodeRabbit deep dive blog&lt;/a&gt;.&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;It is an essential task for PR authors and reviewers to identify issues related to the PR. We discussed how this simple-looking task could be challenging due to many reasons including oversight and project complexity. CodeRabbit’s AI-powered PR review bot solves these challenges by automatically linking issues in the PR. &lt;/p&gt;

</description>
      <category>codereview</category>
      <category>codequality</category>
      <category>githubactions</category>
      <category>github</category>
    </item>
  </channel>
</rss>
