<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: Atul Jalan</title>
    <description>The latest articles on DEV Community by Atul Jalan (@atuljalan).</description>
    <link>https://dev.to/atuljalan</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/atuljalan"/>
    <language>en</language>
    <item>
      <title>After 100s of hours of trial-and-error, I decided to write up a field guide to finding success with AI coding:</title>
      <dc:creator>Atul Jalan</dc:creator>
      <pubDate>Wed, 28 May 2025 18:15:28 +0000</pubDate>
      <link>https://dev.to/atuljalan/after-100s-of-hours-of-trial-and-error-i-decided-to-write-up-a-field-guide-to-finding-success-with-5h0m</link>
      <guid>https://dev.to/atuljalan/after-100s-of-hours-of-trial-and-error-i-decided-to-write-up-a-field-guide-to-finding-success-with-5h0m</guid>
      <description>&lt;div class="ltag__link--embedded"&gt;
  &lt;div class="crayons-story "&gt;
  &lt;a href="https://dev.to/atuljalan/a-practical-field-guide-to-ai-coding-2an8" class="crayons-story__hidden-navigation-link"&gt;A Practical Field Guide to AI Coding&lt;/a&gt;


  &lt;div class="crayons-story__body crayons-story__body-full_post"&gt;
    &lt;div class="crayons-story__top"&gt;
      &lt;div class="crayons-story__meta"&gt;
        &lt;div class="crayons-story__author-pic"&gt;

          &lt;a href="/atuljalan" class="crayons-avatar  crayons-avatar--l  "&gt;
            &lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Fuser%2Fprofile_image%2F2564057%2Fdca30c6b-a7c7-4f31-a19c-bb9a9b416912.png" alt="atuljalan profile" class="crayons-avatar__image"&gt;
          &lt;/a&gt;
        &lt;/div&gt;
        &lt;div&gt;
          &lt;div&gt;
            &lt;a href="/atuljalan" class="crayons-story__secondary fw-medium m:hidden"&gt;
              Atul Jalan
            &lt;/a&gt;
            &lt;div class="profile-preview-card relative mb-4 s:mb-0 fw-medium hidden m:inline-block"&gt;
              
                Atul Jalan
                
              
              &lt;div id="story-author-preview-content-2538237" class="profile-preview-card__content crayons-dropdown branded-7 p-4 pt-0"&gt;
                &lt;div class="gap-4 grid"&gt;
                  &lt;div class="-mt-4"&gt;
                    &lt;a href="/atuljalan" class="flex"&gt;
                      &lt;span class="crayons-avatar crayons-avatar--xl mr-2 shrink-0"&gt;
                        &lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Fuser%2Fprofile_image%2F2564057%2Fdca30c6b-a7c7-4f31-a19c-bb9a9b416912.png" class="crayons-avatar__image" alt=""&gt;
                      &lt;/span&gt;
                      &lt;span class="crayons-link crayons-subtitle-2 mt-5"&gt;Atul Jalan&lt;/span&gt;
                    &lt;/a&gt;
                  &lt;/div&gt;
                  &lt;div class="print-hidden"&gt;
                    
                      Follow
                    
                  &lt;/div&gt;
                  &lt;div class="author-preview-metadata-container"&gt;&lt;/div&gt;
                &lt;/div&gt;
              &lt;/div&gt;
            &lt;/div&gt;

          &lt;/div&gt;
          &lt;a href="https://dev.to/atuljalan/a-practical-field-guide-to-ai-coding-2an8" class="crayons-story__tertiary fs-xs"&gt;&lt;time&gt;May 28 '25&lt;/time&gt;&lt;span class="time-ago-indicator-initial-placeholder"&gt;&lt;/span&gt;&lt;/a&gt;
        &lt;/div&gt;
      &lt;/div&gt;

    &lt;/div&gt;

    &lt;div class="crayons-story__indention"&gt;
      &lt;h2 class="crayons-story__title crayons-story__title-full_post"&gt;
        &lt;a href="https://dev.to/atuljalan/a-practical-field-guide-to-ai-coding-2an8" id="article-link-2538237"&gt;
          A Practical Field Guide to AI Coding
        &lt;/a&gt;
      &lt;/h2&gt;
        &lt;div class="crayons-story__tags"&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/ai"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;ai&lt;/a&gt;
            &lt;a class="crayons-tag  crayons-tag--monochrome " href="/t/programming"&gt;&lt;span class="crayons-tag__prefix"&gt;#&lt;/span&gt;programming&lt;/a&gt;
        &lt;/div&gt;
      &lt;div class="crayons-story__bottom"&gt;
        &lt;div class="crayons-story__details"&gt;
          &lt;a href="https://dev.to/atuljalan/a-practical-field-guide-to-ai-coding-2an8" class="crayons-btn crayons-btn--s crayons-btn--ghost crayons-btn--icon-left"&gt;
            &lt;div class="multiple_reactions_aggregate"&gt;
              &lt;span class="multiple_reactions_icons_container"&gt;
                  &lt;span class="crayons_icon_container"&gt;
                    &lt;img src="https://assets.dev.to/assets/raised-hands-74b2099fd66a39f2d7eed9305ee0f4553df0eb7b4f11b01b6b1b499973048fe5.svg" width="18" height="18"&gt;
                  &lt;/span&gt;
                  &lt;span class="crayons_icon_container"&gt;
                    &lt;img src="https://assets.dev.to/assets/multi-unicorn-b44d6f8c23cdd00964192bedc38af3e82463978aa611b4365bd33a0f1f4f3e97.svg" width="18" height="18"&gt;
                  &lt;/span&gt;
                  &lt;span class="crayons_icon_container"&gt;
                    &lt;img src="https://assets.dev.to/assets/sparkle-heart-5f9bee3767e18deb1bb725290cb151c25234768a0e9a2bd39370c382d02920cf.svg" width="18" height="18"&gt;
                  &lt;/span&gt;
              &lt;/span&gt;
              &lt;span class="aggregate_reactions_counter"&gt;5&lt;span class="hidden s:inline"&gt; reactions&lt;/span&gt;&lt;/span&gt;
            &lt;/div&gt;
          &lt;/a&gt;
            &lt;a href="https://dev.to/atuljalan/a-practical-field-guide-to-ai-coding-2an8#comments" class="crayons-btn crayons-btn--s crayons-btn--ghost crayons-btn--icon-left flex items-center"&gt;
              Comments


              &lt;span class="hidden s:inline"&gt;Add Comment&lt;/span&gt;
            &lt;/a&gt;
        &lt;/div&gt;
        &lt;div class="crayons-story__save"&gt;
          &lt;small class="crayons-story__tertiary fs-xs mr-2"&gt;
            5 min read
          &lt;/small&gt;
            
              &lt;span class="bm-initial"&gt;
                

              &lt;/span&gt;
              &lt;span class="bm-success"&gt;
                

              &lt;/span&gt;
            
        &lt;/div&gt;
      &lt;/div&gt;
    &lt;/div&gt;
  &lt;/div&gt;
&lt;/div&gt;

&lt;/div&gt;


</description>
      <category>ai</category>
      <category>programming</category>
    </item>
    <item>
      <title>A Practical Field Guide to AI Coding</title>
      <dc:creator>Atul Jalan</dc:creator>
      <pubDate>Wed, 28 May 2025 18:11:27 +0000</pubDate>
      <link>https://dev.to/atuljalan/a-practical-field-guide-to-ai-coding-2an8</link>
      <guid>https://dev.to/atuljalan/a-practical-field-guide-to-ai-coding-2an8</guid>
      <description>&lt;p&gt;I, like most other developers, experienced AI coding in three stages: from initial awe, to deep disillusionment, to finally, a balanced view. LLMs are neither super-intelligence nor vaporware. Annoyingly, they're somewhere in the middle. Truly magical for some tasks, utter garbage for others. Your job as a developer is to build intuition for which is which.&lt;/p&gt;

&lt;p&gt;This essay is a field guide to developing that intuition. After hundreds of hits and misses, I've decided to summarize in rank-order the tasks where AI has consistently succeeded, and where it's tended to fall apart.&lt;/p&gt;

&lt;p&gt;Though no one can skip the three stages, hopefully this will speed you along.&lt;/p&gt;

&lt;h2&gt;
  
  
  Green-field tasks (Excellent)
&lt;/h2&gt;

&lt;p&gt;LLMs excel at writing scoped methods that are not dependent on context from your codebase, even when the complexity of the method itself may be high. These kinds of tasks enable models to fully leverage their vast knowledge base without having to consider how it applies to your context.&lt;/p&gt;

&lt;p&gt;For example:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Utilities like rate limiters, date formatting, array transformations, etc.&lt;/li&gt;
&lt;li&gt;Base UI components like inputs and selects&lt;/li&gt;
&lt;li&gt;Regex formulas&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;In these situations, use AI religiously to one-shot the entire thing.&lt;/p&gt;

&lt;p&gt;For the best outcome, write the function or class signature yourself and have the AI fill it in. This will maximize chances that the LLM will adhere to your vision of how the final product should work. For maximum speed, have the model generate just the signature, edit to your liking, then ask it to write the actual code.&lt;/p&gt;

&lt;h2&gt;
  
  
  Pattern Expansion (Great)
&lt;/h2&gt;

&lt;p&gt;LLMs are great at repeating existing patterns - especially once you've written the first few examples. This works surprisingly well even if the pattern is quite unique to your codebase, since LLMs are fundamentally trained to model and reproduce patterns in data.&lt;/p&gt;

&lt;p&gt;For example:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Add more variants to a component once the base exists (e.g. more button types)&lt;/li&gt;
&lt;li&gt;Generate additional endpoints or models following existing conventions&lt;/li&gt;
&lt;li&gt;Expand schema definitions in an OpenAPI or JSON Schema&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Probability of success with this task will depend on two factors:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;How formulaic is the task? Less nuance is better here. You want tasks where a clear set of patterns are repeated every time, instead of conditionally repeated based on other aspects of the data.&lt;/li&gt;
&lt;li&gt;How organized are the examples? If this is a pattern you will repeat a lot (e.g. API endpoints), it's worth the time to create 2-3 excellent examples that you can throw into context every time.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;
  
  
  Documentation (Great)
&lt;/h2&gt;

&lt;p&gt;LLMs are great at producing well-written documentation (e.g. docstrings) for your methods and classes. This task works well because it plays to a core strength of LLMs: compression. Just as they're trained to pack vast amounts of knowledge into a small set of weights, they can also compress logic from lots of code into a high-level explanation.&lt;/p&gt;

&lt;p&gt;LLMs are further aided by existing language conventions for how to write docstrings and comments, leaving them less room to freestyle and potentially mess up.&lt;/p&gt;

&lt;h2&gt;
  
  
  Code Explanation (Good)
&lt;/h2&gt;

&lt;p&gt;LLMs are reasonably effective at summarizing what blocks of code do, even when the code is quite long and spans multiple files. The summaries are rarely perfect, but they tend to be directionally correct, which is often enough to accelerate understanding.&lt;/p&gt;

&lt;p&gt;This task once again takes advantage of an LLMs strong compression capabilities.&lt;/p&gt;

&lt;p&gt;Don't waste time formatting the prompt or providing tons of additional context. The increase in response quality is often minimal.&lt;/p&gt;

&lt;p&gt;Obviously, never trust the explanation word-for-word. The summaries will almost always skip over sections of the source code and confidently misstate logic in a highly believable way. Use this capability as a jumping-off point before diving in, not as the final word.&lt;/p&gt;

&lt;h2&gt;
  
  
  Technical Spec Review (Good)
&lt;/h2&gt;

&lt;p&gt;Ask LLMs to review your technical planning docs and point out possible issues and edge cases you may not have thought of.&lt;/p&gt;

&lt;p&gt;These tasks enable LLMs to map your specific task to the generalized equivalent and pattern-match that against their vast training set. In contrast, asking LLMs to write the document itself will usually perform poorly since the LLM has to map the generalized task to your specific context.&lt;/p&gt;

&lt;p&gt;Even if the majority of its recommendations are irrelevant, there will usually be at least one or two points that are worth considering and addressing in the spec.&lt;/p&gt;

&lt;h2&gt;
  
  
  Debugging (Bad)
&lt;/h2&gt;

&lt;p&gt;LLMs are poor primary tools for debugging. They struggle in situations where the root cause is unclear and context is fragmented. The worst part is that they will confidently hallucinate incorrect solutions, burning your time and spiking your cortisol.&lt;/p&gt;

&lt;p&gt;Debugging is generally a process of exploration. You start with minimal knowledge and slowly gain understanding through iterative investigation. LLMs, on the other hand, perform best when given comprehensive context and asked to produce clear outputs.&lt;/p&gt;

&lt;p&gt;That said, LLMs can be helpful as an encyclopedia. During debugging, I use a parallel chat window for asking general questions about libraries, language behaviors, or common failure modes. These kinds of queries work well because they don't rely on the model having any contextual knowledge of your codebase.&lt;/p&gt;

&lt;h2&gt;
  
  
  Multi-file edits (Terrible)
&lt;/h2&gt;

&lt;p&gt;In my experience, success rates drop sharply as soon as the requested edit spans multiple files. I've tested this by comparing how an LLM performs when asked to make an edit across multiple files vs. those same files combined into one larger document. It always does significantly better in the latter case.&lt;/p&gt;

&lt;p&gt;I've found it's faster to decompose your task into multiple requests to edit single files. This simplifies the contextual burden to the LLM, and the cognitive load to you as the reviewer.&lt;/p&gt;

&lt;h2&gt;
  
  
  Conclusion
&lt;/h2&gt;

&lt;p&gt;For me, success with LLMs has had less to do with prompting and more to do with task selection. This has been in contrast to the conventional wisdom that LLM performance hinges almost entirely on the precision and depth of the prompt.&lt;/p&gt;

&lt;p&gt;No doubt, there are much more sophisticated workflows for using LLMs. My goal has been to optimize time investment to successful results, not to engage in an exhaustive search for the perfect setup. Besides, the landscape is evolving far too quickly to make such a quest worthwhile.&lt;/p&gt;

&lt;p&gt;While online discussion on AI coding skews to the extreme (either AI-first or AI-never), I think most developers are part of a silent majority with a more measured approach. To that group: publish your workflows! There's far too much noise from influencers tweeting about how they're commanding an army of AI agents to build 7 apps in a weekend, and not enough from developers at mature companies explaining how they're using AI coding on real projects in large codebases. Hopefully this guide helps tip the balance in that direction.&lt;/p&gt;




&lt;p&gt;This essay was originally published on &lt;a href="https://composehq.com/blog/ai-coding-field-guide-5-27-25" rel="noopener noreferrer"&gt;composehq.com&lt;/a&gt;.&lt;/p&gt;

</description>
      <category>ai</category>
      <category>programming</category>
    </item>
    <item>
      <title>Lessons from Scaling WebSockets</title>
      <dc:creator>Atul Jalan</dc:creator>
      <pubDate>Fri, 24 Jan 2025 19:20:27 +0000</pubDate>
      <link>https://dev.to/atuljalan/lessons-from-scaling-websockets-3ll4</link>
      <guid>https://dev.to/atuljalan/lessons-from-scaling-websockets-3ll4</guid>
      <description>&lt;p&gt;With the rising demand for sync engines and real-time feature, WebSockets have become a critical component for modern applications. At &lt;a href="https://composehq.com" rel="noopener noreferrer"&gt;Compose&lt;/a&gt;, WebSockets form the backbone of our service, powering our backend SDKs that enable developers to deliver low-latency interactive applications with just backend code.&lt;/p&gt;

&lt;p&gt;But, scaling WebSockets has proven to be far more complex than we expected. Below are some of the most important lessons we've learned along the way.&lt;/p&gt;

&lt;h2&gt;
  
  
  Handle deployments gracefully
&lt;/h2&gt;

&lt;p&gt;Users should never notice when deployments happen, so WebSocket connections need to persist across deployments. This is a delicate process, and requires robust reconnection logic to deal with unexpected issues. At Compose, we achieve near-zero downtime by following these steps:&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;p&gt;Spin up new servers.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Once the new servers are healthy, old servers begin returning &lt;code&gt;503 Service Unavailable&lt;/code&gt; responses to health checks.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;After 4 consecutive &lt;code&gt;503&lt;/code&gt; responses, the load balancer declares the server unhealthy and removes the old servers from the pool. The load balancer health checks every 5 seconds, so this process takes up to 25 seconds.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;Old servers send a custom WebSocket close message instructing clients to delay reconnection by a random interval to avoid a reconnection surge.&lt;/p&gt;&lt;/li&gt;
&lt;/ol&gt;

&lt;ul&gt;
&lt;li&gt;The custom close message lets clients show users a more accurate message during the ~10 second period where the client is disconnected.&lt;/li&gt;
&lt;li&gt;The random delay helps prevent thundering herd issues where all clients reconnect at once. Clients also double the exponential backoff for deployment-related reconnections to account for unforeseen issues.&lt;/li&gt;
&lt;li&gt;The close message is delayed by 20 seconds to account for the time it takes for the load balancer to shift traffic.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Once all clients disconnect, the old servers shut down completely.&lt;/p&gt;

&lt;p&gt;If you're using a managed service like Render or Railway, you should be especially cognizant that client connections are transferred gracefully during deployments.&lt;/p&gt;

&lt;p&gt;Many managed services that tout zero-downtime deployments will wait until all outstanding requests are processed before shutting down a server. Since WebSocket connections are persistent, this can lead to situations in which old servers are active for minutes or even hours after a deploy until the managed service forcibly terminates the process.&lt;/p&gt;

&lt;h2&gt;
  
  
  Establish a consistent message schema
&lt;/h2&gt;

&lt;p&gt;While HTTP comes with built-in routing conventions (&lt;code&gt;GET /user&lt;/code&gt;, &lt;code&gt;POST /company&lt;/code&gt;, &lt;code&gt;PUT /settings&lt;/code&gt;), WebSockets require developers to define their own schema for organizing messages.&lt;/p&gt;

&lt;p&gt;At Compose, every WebSocket message starts with a fixed 2-byte &lt;code&gt;type&lt;/code&gt; prefix for categorizing messages.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;It's space-efficient (only 2 bytes), while still scaling to 65,536 different types.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;It enables clients to reliably slice the &lt;code&gt;type&lt;/code&gt; prefix from the message without affecting the rest of the data, since the prefix is always 2 bytes.&lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;It gives us a simple method for upgrading our APIs by versioning message types.&lt;br&gt;
&lt;/p&gt;&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight typescript"&gt;&lt;code&gt;&lt;span class="kd"&gt;const&lt;/span&gt; &lt;span class="nx"&gt;MESSAGE_TYPE_TO_HEADER&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
  &lt;span class="na"&gt;RENDER_UI&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;aa&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
  &lt;span class="na"&gt;UPDATE_UI&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;ab&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
  &lt;span class="na"&gt;SHOW_LOADING&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;ac&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
  &lt;span class="na"&gt;RENDER_UI_V2&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;ad&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;
  &lt;span class="cm"&gt;/* ... */&lt;/span&gt;
&lt;span class="p"&gt;}&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;Additionally, we use delimiters to separate different fields inside the message, which is both faster to encode/decode and more memory-efficient than JSON.&lt;br&gt;
&lt;/p&gt;

&lt;div class="highlight js-code-highlight"&gt;
&lt;pre class="highlight typescript"&gt;&lt;code&gt;&lt;span class="kd"&gt;const&lt;/span&gt; &lt;span class="nx"&gt;DELIMITER&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="s2"&gt;|&lt;/span&gt;&lt;span class="dl"&gt;"&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;

&lt;span class="kd"&gt;function&lt;/span&gt; &lt;span class="nf"&gt;createDelimitedMessage&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="kd"&gt;type&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="kr"&gt;string&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="nx"&gt;args&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="kr"&gt;any&lt;/span&gt;&lt;span class="p"&gt;[])&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
  &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="nx"&gt;MESSAGE_TYPE_TO_HEADER&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="kd"&gt;type&lt;/span&gt;&lt;span class="p"&gt;],&lt;/span&gt; &lt;span class="p"&gt;...&lt;/span&gt;&lt;span class="nx"&gt;args&lt;/span&gt;&lt;span class="p"&gt;].&lt;/span&gt;&lt;span class="nf"&gt;join&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="nx"&gt;DELIMITER&lt;/span&gt;&lt;span class="p"&gt;);&lt;/span&gt;
&lt;span class="p"&gt;}&lt;/span&gt;

&lt;span class="kd"&gt;function&lt;/span&gt; &lt;span class="nf"&gt;parseDelimitedMessage&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="nx"&gt;message&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt; &lt;span class="kr"&gt;string&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt;
  &lt;span class="kd"&gt;const&lt;/span&gt; &lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="kd"&gt;type&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="p"&gt;...&lt;/span&gt;&lt;span class="nx"&gt;args&lt;/span&gt;&lt;span class="p"&gt;]&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nx"&gt;message&lt;/span&gt;&lt;span class="p"&gt;.&lt;/span&gt;&lt;span class="nf"&gt;split&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="nx"&gt;DELIMITER&lt;/span&gt;&lt;span class="p"&gt;);&lt;/span&gt;
  &lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="p"&gt;{&lt;/span&gt; &lt;span class="kd"&gt;type&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="nx"&gt;args&lt;/span&gt; &lt;span class="p"&gt;};&lt;/span&gt;
&lt;span class="p"&gt;}&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;

&lt;/div&gt;



&lt;p&gt;We're lucky that our backend and frontend are written in TypeScript, allowing us to share message schemas between the two and ensure that neither falls out of sync.&lt;/p&gt;

&lt;h2&gt;
  
  
  Detect silent disconnects with heartbeats
&lt;/h2&gt;

&lt;p&gt;Connections can drop unexpectedly without triggering a &lt;a href="https://developer.mozilla.org/en-US/docs/Web/API/WebSocket/close_event" rel="noopener noreferrer"&gt;close&lt;/a&gt; event, leading to a situation in which the client thinks they're connected, but actually aren't. To prevent stale connections, implementing a robust heartbeat mechanism is essential.&lt;br&gt;
We send periodic &lt;a href="https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API/Writing_WebSocket_servers#pings_and_pongs_the_heartbeat_of_websockets" rel="noopener noreferrer"&gt;ping/pong messages&lt;/a&gt; between client and server and reconnect in cases where the heartbeat isn't received within some interval.&lt;/p&gt;

&lt;p&gt;Our server sends a &lt;code&gt;ping&lt;/code&gt; message every 30 seconds, and expects a &lt;code&gt;pong&lt;/code&gt; response. In cases where the client doesn't receive a &lt;code&gt;ping&lt;/code&gt; every 45 seconds, it immediately drops the connection and tries to reconnect. Similarly, the server closes connections that miss &lt;code&gt;pong&lt;/code&gt; responses within 45 seconds.&lt;/p&gt;

&lt;p&gt;By monitoring heartbeats on both ends, we detect and handle rare cases where the client side network appears functional but the server never receives responses.&lt;/p&gt;

&lt;h2&gt;
  
  
  Have an HTTP fallback
&lt;/h2&gt;

&lt;p&gt;WebSocket connections can be unexpectedly blocked, especially on restrictive public networks. To mitigate such issues, Compose uses &lt;a href="https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events" rel="noopener noreferrer"&gt;server-sent events (SSE)&lt;/a&gt; as a fallback for receiving updates, while HTTP requests handle client-to-server communication.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffeoorsv6sgk3839dz72z.png" class="article-body-image-wrapper"&gt;&lt;img src="https://media2.dev.to/dynamic/image/width=800%2Cheight=%2Cfit=scale-down%2Cgravity=auto%2Cformat=auto/https%3A%2F%2Fdev-to-uploads.s3.amazonaws.com%2Fuploads%2Farticles%2Ffeoorsv6sgk3839dz72z.png" alt="SSE &amp;amp; HTTP Fallback" width="800" height="423"&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Since SSE is HTTP-based, it's much less likely to be blocked, providing a reliable alternative in restricted environments. Plus it still achieves decently low latency, especially compared to short-polling solutions.&lt;/p&gt;

&lt;h2&gt;
  
  
  Concluding thoughts
&lt;/h2&gt;

&lt;p&gt;There's a whole lot more to scaling WebSockets that we didn't cover here. For example:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;
&lt;strong&gt;Lack of standard tooling&lt;/strong&gt;: While most frameworks include built-in tools for rate limiting, data validation, and error handling, you'll generally have to implement these features on your own for WebSockets.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Inability to cache responses&lt;/strong&gt;: Edge networks make it easy to cache HTTP responses close to users, but there's no standard way to accomplish this with WebSockets.&lt;/li&gt;
&lt;li&gt;
&lt;strong&gt;Per-message authentication&lt;/strong&gt;: Guarding against abuse by ensuring that each message is valid for that user before processing it.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;But regardless of the complexity, users expect modern applications to be fast, realtime, and collaborative. And, as of now, there's no better way to achieve that than WebSockets.&lt;/p&gt;

&lt;p&gt;At Compose, WebSockets power the entire platform - from the database all the way to the main UI thread. Via our SDKs, developers can generate full web apps from their backend logic. Making sure those apps are fast and performant at scale requires WebSockets. If you're interested in learning more, &lt;a href="https://docs.composehq.com" rel="noopener noreferrer"&gt;check out our docs&lt;/a&gt;. It takes less than 5 minutes to install the SDK and build your first app.&lt;/p&gt;

</description>
      <category>webdev</category>
      <category>javascript</category>
      <category>programming</category>
      <category>node</category>
    </item>
    <item>
      <title>How we wrote a better debouncer for our UI rendering engine</title>
      <dc:creator>Atul Jalan</dc:creator>
      <pubDate>Tue, 21 Jan 2025 20:18:45 +0000</pubDate>
      <link>https://dev.to/atuljalan/how-we-wrote-a-better-debouncer-for-our-ui-rendering-engine-4ff5</link>
      <guid>https://dev.to/atuljalan/how-we-wrote-a-better-debouncer-for-our-ui-rendering-engine-4ff5</guid>
      <description>&lt;p&gt;&lt;a href="https://composehq.com/blog/optimize-debounce-1-17-25" rel="noopener noreferrer"&gt;https://composehq.com/blog/optimize-debounce-1-17-25&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;I've written a lot of debouncers in my life. Thought I'd share this article on how we wrote a smarter algorithm to debounce updates for UI rendering engine.&lt;/p&gt;

</description>
      <category>javascript</category>
      <category>webdev</category>
      <category>performance</category>
      <category>ui</category>
    </item>
  </channel>
</rss>
