<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: Solo</title>
    <description>The latest articles on DEV Community by Solo (@solo474).</description>
    <link>https://dev.to/solo474</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/solo474"/>
    <language>en</language>
    <item>
      <title>Difference Between useEffect and useLayoutEffect in React</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Mon, 08 Sep 2025 10:41:15 +0000</pubDate>
      <link>https://dev.to/solo474/difference-between-useeffect-and-uselayouteffect-in-react-2aeg</link>
      <guid>https://dev.to/solo474/difference-between-useeffect-and-uselayouteffect-in-react-2aeg</guid>
      <description>&lt;p&gt;The difference between &lt;code&gt;useEffect&lt;/code&gt; and &lt;code&gt;useLayoutEffect&lt;/code&gt; is usually described in terms of timing one executes before the browser paints, and the other executes after. To verify this, I ran a controlled experiment and used Chrome DevTools to observe their execution on the performance timeline.&lt;/p&gt;

&lt;h2&gt;
  
  
  Experiment Setup
&lt;/h2&gt;

&lt;p&gt;The React component under test&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight jsx"&gt;&lt;code&gt;&lt;span class="k"&gt;import&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt; &lt;span class="nx"&gt;useEffect&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="nx"&gt;useLayoutEffect&lt;/span&gt; &lt;span class="p"&gt;}&lt;/span&gt; &lt;span class="k"&gt;from&lt;/span&gt; &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;react&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;
&lt;span class="k"&gt;import&lt;/span&gt; &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;./App.css&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;

&lt;span class="kd"&gt;function&lt;/span&gt; &lt;span class="nf"&gt;App&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
  &lt;span class="nf"&gt;useEffect&lt;/span&gt;&lt;span class="p"&gt;(()&lt;/span&gt; &lt;span class="o"&gt;=&amp;gt;&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
    &lt;span class="nx"&gt;performance&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;mark&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;effect-start&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;);&lt;/span&gt;
    &lt;span class="kd"&gt;const&lt;/span&gt; &lt;span class="nx"&gt;waitUntil&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nb"&gt;Date&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;now&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt; &lt;span class="o"&gt;+&lt;/span&gt; &lt;span class="mi"&gt;1000&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;
    &lt;span class="k"&gt;while &lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="nb"&gt;Date&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;now&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&lt;/span&gt; &lt;span class="nx"&gt;waitUntil&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
      &lt;span class="c1"&gt;// Busy-wait&lt;/span&gt;
    &lt;span class="p"&gt;}&lt;/span&gt;
    &lt;span class="nx"&gt;performance&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;mark&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;effect-end&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;);&lt;/span&gt;
    &lt;span class="nx"&gt;performance&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;measure&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;
      &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;effect-duration&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
      &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;effect-start&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
      &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;effect-end&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;
    &lt;span class="p"&gt;);&lt;/span&gt;
  &lt;span class="p"&gt;},&lt;/span&gt; &lt;span class="p"&gt;[]);&lt;/span&gt;

  &lt;span class="nf"&gt;useLayoutEffect&lt;/span&gt;&lt;span class="p"&gt;(()&lt;/span&gt; &lt;span class="o"&gt;=&amp;gt;&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
    &lt;span class="nx"&gt;performance&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;mark&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;layout-effect-start&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;);&lt;/span&gt;
    &lt;span class="kd"&gt;const&lt;/span&gt; &lt;span class="nx"&gt;waitUntil&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nb"&gt;Date&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;now&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt; &lt;span class="o"&gt;+&lt;/span&gt; &lt;span class="mi"&gt;1000&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;
    &lt;span class="k"&gt;while &lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="nb"&gt;Date&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;now&lt;/span&gt;&lt;span class="p"&gt;()&lt;/span&gt; &lt;span class="o"&gt;&amp;lt;&lt;/span&gt; &lt;span class="nx"&gt;waitUntil&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
      &lt;span class="c1"&gt;// Busy-wait&lt;/span&gt;
    &lt;span class="p"&gt;}&lt;/span&gt;
    &lt;span class="nx"&gt;performance&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;mark&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;layout-effect-end&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;);&lt;/span&gt;
    &lt;span class="nx"&gt;performance&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;measure&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;
      &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;layout-effect-duration&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
      &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;layout-effect-start&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
      &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;layout-effect-end&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;
    &lt;span class="p"&gt;);&lt;/span&gt;
  &lt;span class="p"&gt;},&lt;/span&gt; &lt;span class="p"&gt;[]);&lt;/span&gt;

  &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="p"&gt;&amp;lt;&lt;/span&gt;&lt;span class="nt"&gt;div&lt;/span&gt; &lt;span class="na"&gt;className&lt;/span&gt;&lt;span class="p"&gt;=&lt;/span&gt;&lt;span class="s"&gt;"center-box"&lt;/span&gt;&lt;span class="p"&gt;&amp;gt;&lt;/span&gt;Hello&lt;span class="p"&gt;&amp;lt;/&lt;/span&gt;&lt;span class="nt"&gt;div&lt;/span&gt;&lt;span class="p"&gt;&amp;gt;;&lt;/span&gt;
&lt;span class="p"&gt;}&lt;/span&gt;

&lt;span class="k"&gt;export&lt;/span&gt; &lt;span class="k"&gt;default&lt;/span&gt; &lt;span class="nx"&gt;App&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Both hooks record performance marks so their exact execution time can be identified in the timeline.&lt;/p&gt;

&lt;h2&gt;
  
  
  Observation
&lt;/h2&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fk2mz0bi4j0y1q2ikj64j.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fk2mz0bi4j0y1q2ikj64j.png" alt=" " width="800" height="480"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;After recording the page load in Chrome’s Performance panel, the &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;following sequence was visible&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;The marker &lt;strong&gt;layout-effect-executed&lt;/strong&gt; appears on the timeline before the first paint event.&lt;/li&gt;
&lt;li&gt;The marker &lt;strong&gt;effect-executed&lt;/strong&gt; appears after the first paint has completed.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;The flame chart makes this distinction clear&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;useLayoutEffect occurs synchronously, inside the rendering pipeline.&lt;/li&gt;
&lt;li&gt;useEffect is deferred until after rendering work concludes.&lt;/li&gt;
&lt;/ul&gt;

</description>
      <category>javascript</category>
      <category>react</category>
    </item>
    <item>
      <title>I think we need to start reading code than we write. As AI getting better at generating and completing code. A practice consciously reading code. because only if we truly and consciously understand it and trust it.</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Sun, 07 Sep 2025 12:01:27 +0000</pubDate>
      <link>https://dev.to/solo474/i-think-we-need-to-start-reading-code-than-we-write-as-ai-getting-better-at-generating-and-3e34</link>
      <guid>https://dev.to/solo474/i-think-we-need-to-start-reading-code-than-we-write-as-ai-getting-better-at-generating-and-3e34</guid>
      <description></description>
    </item>
    <item>
      <title>The true cost of Fargate vs 10,000 managed CI minutes</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Fri, 29 Aug 2025 20:27:31 +0000</pubDate>
      <link>https://dev.to/solo474/the-true-cost-of-fargate-vs-10000-managed-ci-minutes-2fnf</link>
      <guid>https://dev.to/solo474/the-true-cost-of-fargate-vs-10000-managed-ci-minutes-2fnf</guid>
      <description>&lt;p&gt;I was thinking about running Playwright tests in AWS ECS Fargate. Running Playwright on ECS Fargate is fast, cheap. For 10,000 minutes at 1 vCPU/2 GB in EU-London, Fargate is about $9.46 (or $7.57 on Graviton, $2.84 on Spot) versus $40–$100 for managed CI minutes on well known CI platforms like GitHub Actions, BitBucket, GitLab, AWS CodeBuild.&lt;/p&gt;

&lt;p&gt;But minutes aren’t the whole bill—watch egress/NAT, caches &amp;amp; storage, and CloudWatch costs, handle Spot preemptions with retries and fallback, and rebuild the "just works" UX of managed CI (ephemeral runners, OIDC, autoscaling, sane defaults).&lt;/p&gt;

&lt;h2&gt;
  
  
  A quick, concrete baseline
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Assumptions
&lt;/h3&gt;

&lt;p&gt;Linux builds, EU (London) pricing, 1 vCPU / 2 GB RAM, 10,000 minutes (~166.7 hours).&lt;/p&gt;

&lt;div class="table-wrapper-paragraph"&gt;&lt;table&gt;
&lt;thead&gt;
&lt;tr&gt;
&lt;th&gt;Service&lt;/th&gt;
&lt;th&gt;What you pay for&lt;/th&gt;
&lt;th&gt;Ballpark cost for 10,000 min&lt;/th&gt;
&lt;/tr&gt;
&lt;/thead&gt;
&lt;tbody&gt;
&lt;tr&gt;
&lt;td&gt;&lt;strong&gt;ECS Fargate (1 vCPU, 2 GB RAM)&lt;/strong&gt;&lt;/td&gt;
&lt;td&gt;Raw compute only (you self-host the runner)&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$9.46&lt;/strong&gt; in &lt;strong&gt;EU (London)&lt;/strong&gt;, or &lt;strong&gt;$8.23&lt;/strong&gt; in &lt;strong&gt;US-East&lt;/strong&gt;
&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;&lt;strong&gt;ECS Fargate (Graviton/ARM, 1 vCPU, 2 GB)&lt;/strong&gt;&lt;/td&gt;
&lt;td&gt;Raw compute only (you self-host the runner)&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$7.57&lt;/strong&gt; in &lt;strong&gt;EU (London)&lt;/strong&gt;, or &lt;strong&gt;$6.52&lt;/strong&gt; in &lt;strong&gt;US-East&lt;/strong&gt;
&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;&lt;strong&gt;ECS Fargate Spot (Linux x86, 1 vCPU, 2 GB)&lt;/strong&gt;&lt;/td&gt;
&lt;td&gt;Interruptible compute (you self-host the runner)&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$2.84&lt;/strong&gt; in &lt;strong&gt;EU (London)&lt;/strong&gt;, or &lt;strong&gt;$2.47&lt;/strong&gt; in &lt;strong&gt;US-East&lt;/strong&gt;
&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;
&lt;strong&gt;AWS CodeBuild&lt;/strong&gt; (general1.small)&lt;/td&gt;
&lt;td&gt;Fully managed runner minutes&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$50.00&lt;/strong&gt; (&lt;strong&gt;$0.005/min&lt;/strong&gt;; first 100 min free)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;
&lt;strong&gt;GitHub Actions&lt;/strong&gt; (Linux std)&lt;/td&gt;
&lt;td&gt;GitHub-hosted runner minutes&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$80.00&lt;/strong&gt; (&lt;strong&gt;$0.008/min&lt;/strong&gt;)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;
&lt;strong&gt;GitLab SaaS&lt;/strong&gt; (shared runners)&lt;/td&gt;
&lt;td&gt;Shared runner minutes&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$100.00&lt;/strong&gt; (&lt;strong&gt;$10 per 1,000 minutes&lt;/strong&gt;)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;
&lt;strong&gt;Bitbucket Pipelines&lt;/strong&gt; (extra minutes)&lt;/td&gt;
&lt;td&gt;Additional pipeline minutes&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$100.00&lt;/strong&gt; (&lt;strong&gt;$10 per 1,000 minutes&lt;/strong&gt;)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;
&lt;strong&gt;CircleCI&lt;/strong&gt; (Linux)&lt;/td&gt;
&lt;td&gt;Credits → minutes (Performance plan)&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$60.00&lt;/strong&gt; on &lt;strong&gt;Linux VM Medium&lt;/strong&gt; (10 credits/min; credits $15 per 25,000 ⇒ ~$0.006/min). &lt;strong&gt;$30.00&lt;/strong&gt; on &lt;strong&gt;Docker Small&lt;/strong&gt; (5 credits/min)&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;
&lt;strong&gt;Azure Pipelines&lt;/strong&gt; (Microsoft-hosted)&lt;/td&gt;
&lt;td&gt;Parallel job, not per-minute&lt;/td&gt;
&lt;td&gt;
&lt;strong&gt;$40.00&lt;/strong&gt; for &lt;strong&gt;unlimited minutes&lt;/strong&gt; after 1,800 free — buy 1 extra hosted parallel job ($40/mo)&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;&lt;/div&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fgpbebb922y72ado4s40r.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fgpbebb922y72ado4s40r.png" alt="Linux builds, EU (London) pricing, 1 vCPU / 2 GB RAM, 10,000 minutes (~166.7 hours)." width="800" height="460"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  But minutes aren’t the whole story The true cost of 10,000 build minutes
&lt;/h2&gt;

&lt;h3&gt;
  
  
  Data transfer &amp;amp; NAT
&lt;/h3&gt;

&lt;p&gt;pulling repos and packages, docker layers, cache uploads. Use VPC endpoints for S3/ECR/CloudWatch and consider a package proxy (npm/pip/maven) to slash egress. You can run it in public subnet. &lt;/p&gt;

&lt;h3&gt;
  
  
  Caches &amp;amp; storage
&lt;/h3&gt;

&lt;p&gt;BuildKit cache to ECR, language caches to S3. Keep short retention—caches grow fast.&lt;/p&gt;

&lt;h3&gt;
  
  
  Logs &amp;amp; metrics
&lt;/h3&gt;

&lt;p&gt;CloudWatch ingestion + retention can quietly rival compute; cap retention to 7–14 days and sample.&lt;/p&gt;

&lt;h3&gt;
  
  
  Reliability
&lt;/h3&gt;

&lt;p&gt;Fargate Spot saves the most but is preemptible—add automatic retries, idempotent pipelines, and a fallback to on-demand if the queue ages.&lt;/p&gt;

&lt;h3&gt;
  
  
  Product velocity
&lt;/h3&gt;

&lt;p&gt;Managed CI buys you images, scaling, and fewer sharp edges. Your Fargate automation must recreate that "just works" feel (ephemeral runners, OIDC, autoscale, sane defaults). If it jut works to run tests. &lt;/p&gt;

&lt;p&gt;Is $40 saved at 10,000 minutes justifiable—or just marginal?&lt;br&gt;
At 10k, it’s mostly marginal: ≈ $40 vs CodeBuild (≈ $70 vs GitHub).&lt;br&gt;
At 100k, it’s material: ≈ $405 (CodeBuild) / $705 (GitHub).&lt;br&gt;
At 1M, it’s compelling: ≈ $4,054 / $7,054.&lt;/p&gt;

&lt;p&gt;In my view, If you're building a Netlify or Vercel-style platform and can automate runner ops, ephemeral Fargate is dramatically cheaper per minute than most managed CI. The savings are real—but the non-minute costs and product trade-offs matter just as much.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;If your CI bill dropped 60–90% with Fargate Spot, what’s the one blocker left?&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;At 10k minutes/month, is $40 saved marginal—or the start of a bigger shift? Why?&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Would you trade occasional Spot retries for 5–10× cheaper minutes?&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;What’s your break-even minute count to justify self-hosted runners?&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;What do you think Choosing Fargate Spot over managed CI can justify any our your use case of CI? Let me know in the comments.&lt;/p&gt;

</description>
      <category>aws</category>
      <category>fargate</category>
      <category>ecs</category>
      <category>ci</category>
    </item>
    <item>
      <title>Progressive Rollouts AKA Canary Releases with Azure Front Door</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Mon, 10 Mar 2025 10:18:54 +0000</pubDate>
      <link>https://dev.to/solo474/progressive-rollouts-aka-canary-releases-with-azure-front-door-2i8o</link>
      <guid>https://dev.to/solo474/progressive-rollouts-aka-canary-releases-with-azure-front-door-2i8o</guid>
      <description>&lt;p&gt;While we want to ship new features, enhancements, and fixes to users with continuous integration and continuous delivery, as the number of users of your application increases, progressive rollouts help minimise risks and ensure a smooth user experience when releasing updates.&lt;/p&gt;

&lt;p&gt;There are many ways to implement a progressive rollout, using Azure Front Door is one of the simplest and most suitable options for managing risks in small to medium applications. It provides an easy-to-configure strategy that balances stability with controlled feature releases.&lt;/p&gt;

&lt;p&gt;Using Azure Front Door for Progressive Rollouts&lt;br&gt;
Azure Front Door acts as a global load balancer and CDN, directing traffic across multiple backend pools based on routing rules. For progressive rollouts, In Front Door we can utilise few mechanisms based on our risk appetite. &lt;/p&gt;

&lt;h2&gt;
  
  
  Weighted Traffic Distribution
&lt;/h2&gt;

&lt;p&gt;Azure Front Door allows traffic splitting between different backend versions using weighted routing. Example: 90% of traffic remains on stable version while 10% is routed to next-release version.&lt;/p&gt;

&lt;h2&gt;
  
  
  Geographic Rollouts
&lt;/h2&gt;

&lt;p&gt;Deploy new features gradually to specific regions. Example: Start in Europe, then expand to North America.&lt;/p&gt;

&lt;h2&gt;
  
  
  Custom Rules with Azure Front Door Rules Engine
&lt;/h2&gt;

&lt;p&gt;Create rules based on HTTP headers, cookies, or query parameters to control rollout logic. Example: Users with a "beta-user" cookie get routed to the new version.&lt;/p&gt;

&lt;h2&gt;
  
  
  Power Users vs. New Users Rollouts
&lt;/h2&gt;

&lt;p&gt;Segment users based on experience level to control feature exposure. Example: Experienced power users in specific regions receive the new version first, while new users continue using the stable version to ensure a smooth onboarding experience.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;1. Origin Group&lt;/strong&gt;&lt;br&gt;
Azure Front Door requires defining an origin group, which contains multiple origins (backend endpoints). In this setup:&lt;/p&gt;

&lt;p&gt;The Stable Origin represents the existing production version, initially receiving most traffic.&lt;br&gt;
The Next-Release Origin represents the new version, receiving a small portion of traffic that can be gradually increased.&lt;/p&gt;

&lt;p&gt;Each origin within the group is assigned a &lt;br&gt;
Each origin within the group is assigned a weight, dictating the proportion of incoming requests it should handle. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;2. Configuring Weighted Traffic Distribution&lt;/strong&gt;&lt;br&gt;
Azure Front Door routes incoming requests to backends based on the configured weight values in the origin group settings. The load balancer evaluates these weights for each request and distributes traffic accordingly.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Weight-Based Load Balancing&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;If 90% weight is assigned to the stable backend and 10% to the new release, then out of 100 requests, roughly 90 will go to the stable backend and 10 to the new version.&lt;/p&gt;

&lt;p&gt;These percentages can be modified as the rollout progresses.&lt;/p&gt;

&lt;p&gt;*&lt;em&gt;Each origin within the group is assigned a *&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;Azure Front Door Weight-Based Load Balancing&lt;br&gt;
Priority Handling&lt;/p&gt;

&lt;p&gt;Both origins in the group should have the same priority level to ensure they are load-balanced based on weight rather than failover behavior. If different priority levels are assigned, Azure Front Door will send all traffic to the higher-priority backend unless it becomes unhealthy.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;3. Ensuring Session Consistency with Session Affinity&lt;/strong&gt;&lt;br&gt;
With this approach, enabling Session affinity on Front Door is critical in a progressive rollout to provide users a consistent experience. If session affinity is not enabled, users might experience. Switching between versions mid-session, leading to inconsistent behavior. Authentication issues if session tokens are not shared across instances.Summary: Azure Front Door Session Affinity&lt;/p&gt;

&lt;p&gt;Azure Front Door uses cookie-based session affinity, leveraging managed cookies (ASLBSA and ASLBSACORS) that encode the origin URL. This mechanism differentiates users, even if they share the same IP address. Read more about Azure Front Door Session Affinity&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Ensuring Session Consistency with Session Affinity&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;*&lt;em&gt;Progressive Rollouts with Azure Front Door – Q&amp;amp;A *&lt;/em&gt;&lt;br&gt;
Q: Will two users on the same public IP get different versions of the application?&lt;br&gt;
A: Yes, because Azure Front Door does not use IP-based session persistence. Instead, it relies on cookies to maintain session affinity. Each user's browser session is assigned a unique session affinity cookie (FrontDoorAffinity), which determines which backend origin they will continue interacting with.&lt;/p&gt;

&lt;p&gt;If session affinity is disabled or not functioning (e.g., due to caching issues), each new request is treated independently, and Azure Front Door routes traffic based on the weighted traffic distribution, which means different users even on the same IP may receive different versions of the application.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Azure Front Door does not use IP-based session persistence&lt;/strong&gt;&lt;br&gt;
Q: Why am I getting a different version of the application every time I refresh the page?&lt;br&gt;
A: One of the following issues might be occurring:&lt;/p&gt;

&lt;p&gt;Session Affinity is Disabled - Without the FrontDoorAffinity cookie, every request is independently routed, causing the backend selection to vary based on traffic weight distribution. Also check if the cookies are disabled in the browser. Read more about Azure Front Door Session Affinity&lt;br&gt;
Cached Responses Prevent Cookie Assignment – If the response from the backend is cacheable (e.g., Cache-Control: public), Azure Front Door cannot attach the session affinity cookie, resulting in random backend selection. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Solution&lt;/strong&gt;&lt;br&gt;
Ensure that session affinity is enabled at the origin group level.&lt;br&gt;
Check that the browser allows cookies and does not clear them between requests. Use "Cache-Control: no-store" in backend responses to prevent caching conflicts with session affinity.&lt;/p&gt;

</description>
      <category>cicd</category>
      <category>azure</category>
      <category>frontdoor</category>
      <category>scrum</category>
    </item>
    <item>
      <title>Are We Asking the Wrong Question About AI Replacing Programmers?</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Mon, 10 Mar 2025 10:17:06 +0000</pubDate>
      <link>https://dev.to/solo474/are-we-asking-the-wrong-question-about-ai-replacing-programmers-1ik4</link>
      <guid>https://dev.to/solo474/are-we-asking-the-wrong-question-about-ai-replacing-programmers-1ik4</guid>
      <description>&lt;p&gt;Will AI can replace programmers ? The Real Question is will humans ever stop seeking more precise expression?&lt;/p&gt;

&lt;p&gt;I believe we humans always strive to express ideas in a more precise form. This requires a deeper contextual understanding of the system. As computers advance, our ability to describe and manipulate systems grows deeper. In my opinion, &lt;/p&gt;

&lt;h2&gt;
  
  
  Role of Programming Languages in Methodological Expression
&lt;/h2&gt;

&lt;p&gt;In my view, the book SICP: Structure and Interpretation of Computer Programs has proven that AI cannot replace programmers, even 40 years ago. This is, of course, a personal viewpoint, open to be proven wrong. &lt;/p&gt;

&lt;p&gt;"First, we want to establish the idea that a computer language is not just a way of getting a computer to perform operations but rather that it is a novel formal medium for expressing ideas about methodology." — SICP&lt;/p&gt;

&lt;p&gt;The Evolution of Computing and the Endurance of Technical Depth&lt;br&gt;
The history of computing supports this notion. Abstraction layers have not rendered deeper technical understanding obsolete; they have, instead, co-evolved with fundamental advancements in computing.&lt;/p&gt;

&lt;h2&gt;
  
  
  Coexistence of High-Level and Low-Level Programming
&lt;/h2&gt;

&lt;p&gt;Virtual machine (VM)-based programming languages like Java, C#, Python, and JavaScript have not replaced the need for system-level programming—instead, they coexist with low-level languages like C, C++, and Rust, which remain critical not only for performance but also for operating system kernels, system security, and hardware interaction.&lt;/p&gt;

&lt;h2&gt;
  
  
  Co-evolution of Programming Languages and Hardware Architectures.
&lt;/h2&gt;

&lt;p&gt;As C and C++ pushed for more control and efficiency, ISAs evolved to unlock new possibilities, driving performance and scalability to levels once thought impossible. And as architectures like RISC streamlined instructions and CISC embraced complexity, compilers evolved to squeeze out every ounce of efficiency. &lt;/p&gt;

&lt;p&gt;This dynamic is a feedback loop of innovation where software and hardware continuously push each other forward. In the end, it's this synergy that fuels the exponential growth of modern computing, reminding us that real breakthroughs happen when disciplines collide and evolve together.&lt;/p&gt;

&lt;p&gt;The need to express precise low-level computation instructions ensures that ISAs—or something better—will keep evolving. As hardware demands shift with advancements in AI, quantum computing, and specialized accelerators, rigid instruction sets won’t be enough. The focus will move toward more flexible, efficient, and domain-specific ways to control hardware. &lt;/p&gt;

&lt;p&gt;RISC-V is breaking away from traditional architectures like CISC due to the ongoing tension between complexity, efficiency, and the demand for flexibility in modern computing. CISC architectures (like x86) were designed to handle complex instructions within a single cycle, but this led to increased hardware complexity and less adaptability in a rapidly evolving tech landscape.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F7ilj3f4tah6z8rn2ne09.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F7ilj3f4tah6z8rn2ne09.png" alt=" " width="640" height="384"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  The Continued Importance of Foundational Knowledge in Circuit Design.
&lt;/h2&gt;

&lt;p&gt;While VHDL and Verilog have streamlined digital circuit design, a deep understanding of digital logic remains essential. Engineers with strong fundamentals can debug efficiently, optimise for performance, and minimise resource usage in FPGA and ASIC designs. Knowledge of Boolean algebra, Karnaugh maps, and FSMs ensures better synthesis and hardware efficiency. Foundational skills also help in adapting to new technologies, bridging theory with real-world implementation. Despite advancements in HDL, digital logic remains the backbone of efficient and reliable circuit design.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fwjachqvp7b22m72lpdoe.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fwjachqvp7b22m72lpdoe.png" alt=" " width="526" height="559"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fxg0d7gnsjg6cwjnb099n.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Fxg0d7gnsjg6cwjnb099n.jpg" alt="Image source: https://commons.wikimedia.org/wiki/File:NV_0501_Byers_Social4.jpg" width="800" height="420"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Semiconductor Fundamentals Still Matter in Modern Digital Circuit Design.
&lt;/h2&gt;

&lt;p&gt;While digital logic abstractions like logic gates and finite state machines have simplified circuit design, they fundamentally rely on the behavior of underlying semiconductor devices. A deep understanding of transistor-level physics remains essential for optimizing performance, power efficiency, and reliability. Technologies like CMOS, which dominate modern circuits, require knowledge of charge dynamics, leakage currents, and material properties to manage power consumption and thermal performance effectively. Advanced architectures such as FinFETs and GAAFETs, along with considerations of signal integrity and noise immunity, stem directly from semiconductor innovations rather than logical abstractions. Designers must also account for process variations and fabrication constraints that influence circuit behavior. As devices scale to the nanoscale, quantum effects further impact the behavior of logic gates, making semiconductor knowledge critical. Ultimately, mastering these fundamentals is vital for pushing the boundaries of digital design and achieving greater efficiency and performance.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9fc0ramqky7kmvxwfajo.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2F9fc0ramqky7kmvxwfajo.png" alt=" " width="640" height="324"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Progressing Beyond Classical Electronics
&lt;/h2&gt;

&lt;p&gt;While classical electronics and semiconductor physics form the backbone of modern technology, they haven’t limited innovation—they’ve propelled it. Engineers and scientists continue to push beyond traditional boundaries, diving into quantum mechanics to explore new computational possibilities and improve material properties. Advances in fabrication processes, such as EUV lithography and 3D chip stacking, have enabled more efficient, powerful, and compact devices. Meanwhile, quantum principles are increasingly influencing chip design, paving the way for quantum computing, spintronics, and neuromorphic architectures. By integrating classical principles with emerging technologies, the evolution of electronics continues to accelerate, redefining what’s possible in modern computing.&lt;/p&gt;

&lt;p&gt;"The computer revolution is a revolution in the way we think and in the way we express what we think. The essence of this change is the emergence of what might best be called procedural epistemology—the study of the structure of knowledge from an imperative point of view, as opposed to the more declarative point of view taken by classical mathematical subjects." — SICP&lt;/p&gt;

&lt;p&gt;Programming Languages as Tools for Procedural Epistemology&lt;br&gt;
A programming language is a formal system for expressing procedural epistemology, providing a precise and structured way to define, manipulate, and execute knowledge. Unlike natural languages, it is rigorous and unambiguous, enabling clear representation of computational processes.&lt;/p&gt;

</description>
    </item>
    <item>
      <title>Unit Test Suit Scrutiny</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Thu, 21 Oct 2021 16:26:16 +0000</pubDate>
      <link>https://dev.to/solo474/unit-test-suit-scrutiny-52d9</link>
      <guid>https://dev.to/solo474/unit-test-suit-scrutiny-52d9</guid>
      <description>&lt;p&gt;Doing scrutiny with below points may help to maintain your unit test suite to make more sense&lt;/p&gt;

&lt;h2&gt;
  
  
  Developer experience
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;How easy it is to use the test suite for a developer in a busy day ? &lt;/li&gt;
&lt;li&gt;Is your CI is good enough to execute your tests frequent enough ? &lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Test failures
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Break a code unit and find which tests are failing. it should expose week points&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Test case descriptions and error messages
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;how accurate are the Error messages on failure of test ? Are you able to find out what exactly broken just by looking at the message ?&lt;/li&gt;
&lt;li&gt;How easy it is for you to understand what the code unit is responsible for just by looking at log of test suite execution ?&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Test suite performance
&lt;/h2&gt;

&lt;ul&gt;
&lt;li&gt;Are there any tests taking unreasonable or significant amount of time ?&lt;/li&gt;
&lt;li&gt;Consider skipping such tests and find the root cause of low performance of that test. This should help you to know the unknowns on which the implementation is relaying on. &lt;/li&gt;
&lt;/ul&gt;

</description>
    </item>
    <item>
      <title>Not Just Clean Code</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Sat, 19 Dec 2020 12:53:15 +0000</pubDate>
      <link>https://dev.to/solo474/not-just-clean-code-5d96</link>
      <guid>https://dev.to/solo474/not-just-clean-code-5d96</guid>
      <description>&lt;p&gt;The quote "Programs must be written for people to read, and only incidentally for machines to execute" By reading the book SICP or at least I believe it has a deeper meaning than readability of the code. &lt;/p&gt;

&lt;p&gt;If we clearly read the actual phrases from SICP. They were talking about creating abstractions that hide details of the complexity of machine instructions which allows actual code looks closer to problem domain. &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;&lt;em&gt;First, we want to establish the idea that a computer language is not just a way of getting a computer to perform operations but rather that it is a novel formal medium for expressing ideas&lt;br&gt;
about methodology&lt;/em&gt;&lt;/strong&gt;. Thus, programs must be written for people to read, and only incidentally for machines to execute.&lt;/p&gt;

&lt;p&gt;A domain-specific language is something that can be understood and can be used as a medium of precise communication between people working in that domain is a higher-level abstraction that can be easily understood without thinking about how the computer manages memory and schedules the time of CPU to execute instructions of multiple programs in an operating system.&lt;/p&gt;

</description>
      <category>codequality</category>
      <category>programming</category>
      <category>architecture</category>
      <category>code</category>
    </item>
    <item>
      <title>Flush those first bytes</title>
      <dc:creator>Solo</dc:creator>
      <pubDate>Thu, 17 Sep 2020 08:40:23 +0000</pubDate>
      <link>https://dev.to/solo474/web-page-speed-74h</link>
      <guid>https://dev.to/solo474/web-page-speed-74h</guid>
      <description>&lt;h1&gt;
  
  
  series: strategic web performance
&lt;/h1&gt;

&lt;p&gt;This is my first post from strategic web performance series where I publish my analysis and some strategies based on my understanding which may help other to understand and develop web applications by keeping performance in mind.&lt;/p&gt;

&lt;p&gt;This post focuses on explaining the need of flushing the initial and critical HTML content to improve the server response times.&lt;/p&gt;

&lt;p&gt;Let us start by understanding the journey of the page response from the server through telecommunications network, then we can think about strategies we can apply to design and applications with improved performance. &lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2F29fdu5kjk59fgm5cayps.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2F29fdu5kjk59fgm5cayps.jpg" alt="Alt Text" width="650" height="103"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;Browser sends request to the server and will be waiting for the server to respond.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;If Server respond with HTML document after constructing complete document -- this will take several milli seconds or even more especially when construction of some parts of it require high computation or dependency on other systems.&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2Fqshfev73bfin0w1n7ig3.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2Fqshfev73bfin0w1n7ig3.jpg" alt="Alt Text" width="745" height="137"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;h2&gt;
  
  
  Overlap
&lt;/h2&gt;

&lt;p&gt;We will need to start flushing the meaningful HTML content to the stream as soon as it is available.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2Fk4al09phu8slalm5zvg1.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2Fk4al09phu8slalm5zvg1.jpg" alt="Alt Text" width="745" height="127"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Then we will get an overlap of server time and network time. as when server is busy constructing remaining part of the page the response is already on the wire.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2Fe2xlxjavxpwh4qcady8h.jpg" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fi%2Fe2xlxjavxpwh4qcady8h.jpg" alt="Alt Text" width="457" height="91"&gt;&lt;/a&gt;&lt;/p&gt;

</description>
      <category>browser</category>
      <category>performance</category>
      <category>speed</category>
      <category>webpack</category>
    </item>
  </channel>
</rss>
