<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: Vahid A.Nezhad</title>
    <description>The latest articles on DEV Community by Vahid A.Nezhad (@vahidoo).</description>
    <link>https://dev.to/vahidoo</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/vahidoo"/>
    <language>en</language>
    <item>
      <title>[Boost]</title>
      <dc:creator>Vahid A.Nezhad</dc:creator>
      <pubDate>Wed, 06 Aug 2025 12:24:56 +0000</pubDate>
      <link>https://dev.to/vahidoo/-3bl2</link>
      <guid>https://dev.to/vahidoo/-3bl2</guid>
      <description>&lt;div class="ltag__link--embedded"&gt;
  &lt;div class="crayons-story "&gt;
  &lt;a href="https://dev.to/vahidoo/llms-get-worse-at-debugging-their-own-code-5h7o" class="crayons-story__hidden-navigation-link"&gt;🔁 LLMs Get Worse at Debugging Their Own Code&lt;/a&gt;


  &lt;div class="crayons-story__body crayons-story__body-full_post"&gt;
    &lt;div class="crayons-story__top"&gt;
      &lt;div class="crayons-story__meta"&gt;
        &lt;div class="crayons-story__author-pic"&gt;

          &lt;a href="/vahidoo" class="crayons-avatar  crayons-avatar--l  "&gt;
            &lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Fuser%2Fprofile_image%2F3414601%2F0dae2c8f-fa18-4507-90fe-5bd2e1b20ab5.jpg" alt="vahidoo profile" class="crayons-avatar__image"&gt;
          &lt;/a&gt;
        &lt;/div&gt;
        &lt;div&gt;
          &lt;div&gt;
            &lt;a href="/vahidoo" class="crayons-story__secondary fw-medium m:hidden"&gt;
              Vahid A.Nezhad
            &lt;/a&gt;
            &lt;div class="profile-preview-card relative mb-4 s:mb-0 fw-medium hidden m:inline-block"&gt;
              
                Vahid A.Nezhad
                
              
              &lt;div id="story-author-preview-content-2755710" class="profile-preview-card__content crayons-dropdown branded-7 p-4 pt-0"&gt;
                &lt;div class="gap-4 grid"&gt;
                  &lt;div class="-mt-4"&gt;
                    &lt;a href="/vahidoo" class="flex"&gt;
                      &lt;span class="crayons-avatar crayons-avatar--xl mr-2 shrink-0"&gt;
                        &lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Fuser%2Fprofile_image%2F3414601%2F0dae2c8f-fa18-4507-90fe-5bd2e1b20ab5.jpg" class="crayons-avatar__image" alt=""&gt;
                      &lt;/span&gt;
                      &lt;span class="crayons-link crayons-subtitle-2 mt-5"&gt;Vahid A.Nezhad&lt;/span&gt;
                    &lt;/a&gt;
                  &lt;/div&gt;
                  &lt;div class="print-hidden"&gt;
                    
                      Follow
                    
                  &lt;/div&gt;
                  &lt;div class="author-preview-metadata-container"&gt;&lt;/div&gt;
                &lt;/div&gt;
              &lt;/div&gt;
            &lt;/div&gt;

          &lt;/div&gt;
          &lt;a href="https://dev.to/vahidoo/llms-get-worse-at-debugging-their-own-code-5h7o" class="crayons-story__tertiary fs-xs"&gt;&lt;time&gt;Aug 6 '25&lt;/time&gt;&lt;span class="time-ago-indicator-initial-placeholder"&gt;&lt;/span&gt;&lt;/a&gt;
        &lt;/div&gt;
      &lt;/div&gt;

    &lt;/div&gt;

    &lt;div class="crayons-story__indention"&gt;
      &lt;h2 class="crayons-story__title crayons-story__title-full_post"&gt;
        &lt;a href="https://dev.to/vahidoo/llms-get-worse-at-debugging-their-own-code-5h7o" id="article-link-2755710"&gt;
          🔁 LLMs Get Worse at Debugging Their Own Code
        &lt;/a&gt;
      &lt;/h2&gt;
        &lt;div class="crayons-story__tags"&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/llm"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;llm&lt;/a&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/coding"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;coding&lt;/a&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/ai"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;ai&lt;/a&gt;
        &lt;/div&gt;
      &lt;div class="crayons-story__bottom"&gt;
        &lt;div class="crayons-story__details"&gt;
          &lt;a href="https://dev.to/vahidoo/llms-get-worse-at-debugging-their-own-code-5h7o" class="crayons-btn crayons-btn--s crayons-btn--ghost crayons-btn--icon-left"&gt;
            &lt;div class="multiple_reactions_aggregate"&gt;
              &lt;span class="multiple_reactions_icons_container"&gt;
                  &lt;span class="crayons_icon_container"&gt;
                    &lt;img src="https://assets.dev.to/assets/sparkle-heart-5f9bee3767e18deb1bb725290cb151c25234768a0e9a2bd39370c382d02920cf.svg" width="18" height="18"&gt;
                  &lt;/span&gt;
              &lt;/span&gt;
              &lt;span class="aggregate_reactions_counter"&gt;1&lt;span class="hidden s:inline"&gt; reaction&lt;/span&gt;&lt;/span&gt;
            &lt;/div&gt;
          &lt;/a&gt;
            &lt;a href="https://dev.to/vahidoo/llms-get-worse-at-debugging-their-own-code-5h7o#comments" class="crayons-btn crayons-btn--s crayons-btn--ghost crayons-btn--icon-left flex items-center"&gt;
              Comments


              3&lt;span class="hidden s:inline"&gt; comments&lt;/span&gt;
            &lt;/a&gt;
        &lt;/div&gt;
        &lt;div class="crayons-story__save"&gt;
          &lt;small class="crayons-story__tertiary fs-xs mr-2"&gt;
            2 min read
          &lt;/small&gt;
            
              &lt;span class="bm-initial"&gt;
                

              &lt;/span&gt;
              &lt;span class="bm-success"&gt;
                

              &lt;/span&gt;
            
        &lt;/div&gt;
      &lt;/div&gt;
    &lt;/div&gt;
  &lt;/div&gt;
&lt;/div&gt;

&lt;/div&gt;


</description>
      <category>llm</category>
      <category>coding</category>
      <category>ai</category>
    </item>
    <item>
      <title>🔁 LLMs Get Worse at Debugging Their Own Code</title>
      <dc:creator>Vahid A.Nezhad</dc:creator>
      <pubDate>Wed, 06 Aug 2025 12:24:31 +0000</pubDate>
      <link>https://dev.to/vahidoo/llms-get-worse-at-debugging-their-own-code-5h7o</link>
      <guid>https://dev.to/vahidoo/llms-get-worse-at-debugging-their-own-code-5h7o</guid>
      <description>&lt;p&gt;If you’re building tools that use large language models to write or fix code, there’s something you should know:&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;LLMs lose most of their debugging ability after just 1 or 2 attempts.&lt;/p&gt;
&lt;/blockquote&gt;

&lt;p&gt;This isn’t intuition — it’s measurable.&lt;/p&gt;

&lt;p&gt;A new paper from Oxford and McGill introduces something called the Debugging Decay Index.&lt;br&gt;
It tracks how LLMs perform as they try to fix the same code, over and over.&lt;/p&gt;

&lt;p&gt;What it shows is simple, but important:&lt;/p&gt;

&lt;h2&gt;
  
  
  📉 LLMs Plateau After 2 Fixes
&lt;/h2&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F2vkhxf040f9qcs1rfrlt.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F2vkhxf040f9qcs1rfrlt.png" alt=" " width="800" height="544"&gt;&lt;/a&gt;&lt;br&gt;
The first fix attempt usually helps.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;The second one adds marginal value.&lt;/li&gt;
&lt;li&gt;By the third, things start breaking down — repeated edits, semantic drift, hallucinations.&lt;/li&gt;
&lt;li&gt;Performance drops up to 80% by the 5th round.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;So if your tool sends the same broken code back to the LLM again and again hoping it’ll “get it right” eventually — it probably won’t.&lt;/p&gt;

&lt;h2&gt;
  
  
  🧠 Why It Happens
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;LLMs don’t really “understand” what went wrong, they often repeat patterns they think look like a fix.&lt;/li&gt;
&lt;li&gt;They get overconfident, even when wrong.&lt;/li&gt;
&lt;li&gt;They lose track of the original intent.&lt;/li&gt;
&lt;li&gt;The more they edit their own output, the worse it gets.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;It’s like copying answers from your own wrong homework — again and again.&lt;/p&gt;

&lt;h2&gt;
  
  
  🤖 Why This Matters for LLM Builders
&lt;/h2&gt;

&lt;p&gt;If you're working on:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Autonomous coding agents&lt;/li&gt;
&lt;li&gt;LLM dev assistants&lt;/li&gt;
&lt;li&gt;Auto-debug pipelines&lt;/li&gt;
&lt;li&gt;Or anything where models fix their own mistakes…&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;This research is a warning.&lt;/p&gt;

&lt;p&gt;You need to limit how many times the model “self-fixes” before restarting, re-rolling, or using an external signal (like a test suite).&lt;/p&gt;

&lt;p&gt;More isn’t better.&lt;br&gt;
More is worse.&lt;/p&gt;

&lt;h2&gt;
  
  
  🔄 Rethink the Loop
&lt;/h2&gt;

&lt;p&gt;Instead of letting the model rewrite broken code over and over, try this:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;1–2 fix attempts max&lt;/li&gt;
&lt;li&gt;Then restart from the original prompt, or&lt;/li&gt;
&lt;li&gt;Try a different approach entirely&lt;/li&gt;
&lt;li&gt;Or use tests + heuristics to guide the fix&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Debugging with LLMs isn’t a straight line. It’s a decision tree — and going in circles doesn’t help.&lt;/p&gt;

&lt;p&gt;🧾 Source&lt;br&gt;
Paper: &lt;a href="https://arxiv.org/abs/2506.18403" rel="noopener noreferrer"&gt;The Debugging Decay Index&lt;/a&gt; (Oxford &amp;amp; McGill, 2024)&lt;/p&gt;

</description>
      <category>llm</category>
      <category>coding</category>
      <category>ai</category>
    </item>
    <item>
      <title>Python Prompts -Vibe Coding</title>
      <dc:creator>Vahid A.Nezhad</dc:creator>
      <pubDate>Tue, 05 Aug 2025 16:25:39 +0000</pubDate>
      <link>https://dev.to/vahidoo/python-prompts-vibe-coding-1j57</link>
      <guid>https://dev.to/vahidoo/python-prompts-vibe-coding-1j57</guid>
      <description>&lt;p&gt;In this short article, we’ll cover best practices for using coding LLMs to save time on everyday development tasks, so you can spend more effort building real value.&lt;/p&gt;

&lt;p&gt;Here are 4 key stages of a typical Python backend project lifecycle, from initialization to feature expansion, showing how LLMs can help at each step.&lt;/p&gt;

&lt;h2&gt;
  
  
  1. Project Initialization
&lt;/h2&gt;

&lt;p&gt;Start with a simple version, test thoroughly, and then scale your code step by step:&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Set up a basic FastAPI app with `/healthcheck`, Pydantic validation, and Uvicorn entrypoint.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Generate a Django project with `users` and `projects` apps. Add SQLite DB and default URLs.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Write a `docker-compose.yml` for this Node.js + PostgreSQL + Redis stack.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Add CORS support to this FastAPI app, allowing requests from `localhost:3000`.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  2. Git (Commit, Push, PR Review)
&lt;/h2&gt;

&lt;p&gt;As long as LLMs understand your code changes, they can generate great commit messages.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Summarize changes in this file for a good commit message.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Write a commit message for this diff, following the Conventional Commits format.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Check this PR diff and point out risky changes or missing tests.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Fix merge conflict in this file. Keep changes from both branches where possible.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  3. Debugging
&lt;/h2&gt;



&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Explain this traceback and suggest a fix. Python 3.11 + SQLAlchemy.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Why is this Prisma migration failing? Here's the schema and error.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;This Django form isn't saving correctly — find the issue and fix it.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Logs show 500 on `/login`. Trace the root cause in the controller and model.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;h2&gt;
  
  
  4. Adding Features to Existing Project
&lt;/h2&gt;

&lt;p&gt;Adding new features can be tricky, so make sure the LLM reviews your code carefully. Keep the feature simple at first. Sometimes LLMs expect production-ready code and try to generate your feature at that level.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Add a `PUT /users/{id}` endpoint to this FastAPI router. Validate input and return updated user.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Create a background job in Django using Celery to send weekly emails.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Add file upload support to this Node.js Express route. Limit to 5MB images only.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Update this Prisma schema to support tags (many-to-many) for blog posts.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;





&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight plaintext"&gt;&lt;code&gt;Refactor this 200-line function into smaller reusable helpers. Keep tests passing.
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;This article was originally published on:&lt;br&gt;
&lt;a href="https://medium.com/@vahida9i/python-prompts-vibe-coding-642ece6ff8f9" rel="noopener noreferrer"&gt;https://medium.com/@vahida9i/python-prompts-vibe-coding-642ece6ff8f9&lt;/a&gt;&lt;/p&gt;

</description>
      <category>vibecoding</category>
      <category>python</category>
      <category>promptengineering</category>
      <category>ai</category>
    </item>
  </channel>
</rss>
