<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>DEV Community: aarnav preeth</title>
    <description>The latest articles on DEV Community by aarnav preeth (@aarnav_preeth_3f78c31815f).</description>
    <link>https://dev.to/aarnav_preeth_3f78c31815f</link>
    
    <atom:link rel="self" type="application/rss+xml" href="https://dev.to/feed/aarnav_preeth_3f78c31815f"/>
    <language>en</language>
    <item>
      <title>Pre-deployment evaluation for models that run continuously</title>
      <dc:creator>aarnav preeth</dc:creator>
      <pubDate>Tue, 10 Feb 2026 09:00:49 +0000</pubDate>
      <link>https://dev.to/aarnav_preeth_3f78c31815f/pre-deployment-evaluation-for-models-that-run-continuously-36ne</link>
      <guid>https://dev.to/aarnav_preeth_3f78c31815f/pre-deployment-evaluation-for-models-that-run-continuously-36ne</guid>
      <description>&lt;p&gt;When working with models that run continuously, I’ve found it hard to reason about how performance degrades over time using only static train/test evaluation. For those of you who deploy long-lived models: how do you currently build intuition about model behavior under distributional change before deployment, if at all? What kinds of tools or practices do you rely on?&lt;/p&gt;

</description>
      <category>datascience</category>
      <category>discuss</category>
      <category>machinelearning</category>
      <category>testing</category>
    </item>
  </channel>
</rss>
