-- Check if counts match
SELECT
w.WAREHOUSE_NAME,
w.SPILLAGE_QUERIES_COUNT AS WAREHOUSE_SPILLAGE,
COALESCE(q.QUERY_SPILLAGE, 0) AS DRILLDOWN_SPILLAGE,
w.SPILLAGE_QUERIES_COUNT - COALESCE(q.QUERY_SPILLAGE, 0) AS DIFFERENCE
FROM (
SELECT WAREHOUSE_NAME, SUM(SPILLAGE_QUERIES_COUNT) AS SPILLAGE_QUERIES_COUNT
FROM POLARGOVERN_TEST.PUBLIC.ALL_WH_KPI_HISTORY_DATA
WHERE KPI_DATE BETWEEN '2025-08-13' AND '2025-08-19'
GROUP BY WAREHOUSE_NAME
) w
LEFT JOIN (
SELECT WAREHOUSE_NAME, SUM(SPILLAGE_QUERIES_COUNT) AS QUERY_SPILLAGE
FROM POLARSLED_DB.DEMO_UPLOAD.query_360_table
WHERE QUERY_DATE BETWEEN '2025-08-13' AND '2025-08-19'
GROUP BY WAREHOUSE_NAME
) q ON w.WAREHOUSE_NAME = q.WAREHOUSE_NAME
ORDER BY ABS(DIFFERENCE) DESC;
# 🔥 FIXED: Use the warehouse procedure table for all warehouse data
SQL_ALL_WAREHOUSES = """
SELECT
WAREHOUSE_NAME,
MAX(SIZE) AS SIZE,
MAX(AUTO_SUSPEND) AS AUTO_SUSPEND,
MIN(MIN_CLUSTER_COUNT) AS MIN_CLUSTER_COUNT,
MAX(MAX_CLUSTER_COUNT) AS MAX_CLUSTER_COUNT,
MAX(SCALING_POLICY) AS SCALING_POLICY,
SUM(QUERIES_LT_1S) AS QUERIES_LT_1S,
SUM(QUERIES_1_5S) AS QUERIES_1_5S,
SUM(QUERIES_6_60S) AS QUERIES_6_60S,
SUM(QUERIES_61_300S) AS QUERIES_61_300S,
SUM(QUERIES_GT_300S) AS QUERIES_GT_300S,
SUM(UTIL_0_20_COUNT) AS UTIL_0_20_COUNT,
SUM(UTIL_20_40_COUNT) AS UTIL_20_40_COUNT,
SUM(UTIL_40_60_COUNT) AS UTIL_40_60_COUNT,
SUM(UTIL_60_80_COUNT) AS UTIL_60_80_COUNT,
SUM(UTIL_80_100_COUNT) AS UTIL_80_100_COUNT,
SUM(QUEUED_QUERIES_COUNT) AS QUEUED_QUERIES_COUNT,
SUM(SPILLAGE_QUERIES_COUNT) AS SPILLAGE_QUERIES_COUNT,
SUM(BLOCKED_TXN_COUNT) AS BLOCKED_TXN_COUNT,
SUM(FAILED_QUERIES_COUNT) AS FAILED_QUERIES_COUNT,
SUM(INCIDENT_QUERIES_COUNT) AS INCIDENT_QUERIES_COUNT,
SUM(CREDITS_USED) AS CREDITS_USED
FROM POLARGOVERN_TEST.PUBLIC.ALL_WH_KPI_HISTORY_DATA
WHERE KPI_DATE >= TO_DATE('2025-08-13')
AND KPI_DATE <= TO_DATE('2025-08-19')
GROUP BY WAREHOUSE_NAME
ORDER BY WAREHOUSE_NAME
"""
# FIXED: Warehouse KPI drill-down - Now correctly filters and groups
SQL_WAREHOUSE_KPI_DRILL = """
SELECT
USER_NAME,
WAREHOUSE_NAME,
COUNT(*) AS QUERY_COUNT,
MIN(START_TIME) AS FIRST_QUERY_TIME,
MAX(START_TIME) AS LAST_QUERY_TIME,
AVG(TOTAL_ELAPSED_TIME_SECONDS) AS AVG_DURATION_SECONDS,
SUM(CREDITS_USED) AS TOTAL_CREDITS,
COUNT(CASE WHEN EXECUTION_STATUS = 'SUCCESS' THEN 1 END) AS SUCCESSFUL_QUERIES,
COUNT(CASE WHEN EXECUTION_STATUS IN ('FAILED', 'CANCELLED') THEN 1 END) AS FAILED_QUERIES
FROM POLARSLED_DB.DEMO_UPLOAD.query_360_table
WHERE WAREHOUSE_NAME = %(warehouse_name)s
AND QUERY_DATE >= TO_DATE('2025-08-13')
AND QUERY_DATE <= TO_DATE('2025-08-19')
AND {kpi_column} = 1 -- This filters to only queries that match the KPI
GROUP BY USER_NAME, WAREHOUSE_NAME
ORDER BY QUERY_COUNT DESC
LIMIT 1000
"""
# User KPI drill-down - Returns individual queries as expected
SQL_USER_KPI_DRILL = """
SELECT *
FROM POLARSLED_DB.DEMO_UPLOAD.query_360_table
WHERE USER_NAME = %(username)s
AND QUERY_DATE >= TO_DATE('2025-08-13')
AND QUERY_DATE <= TO_DATE('2025-08-19')
AND {kpi_column} = 1
ORDER BY START_TIME DESC
LIMIT 1000
"""
# User-Warehouse KPI drill-down - Returns individual queries for specific user in warehouse
SQL_USER_WAREHOUSE_KPI_DRILL = """
SELECT *
FROM POLARSLED_DB.DEMO_UPLOAD.query_360_table
WHERE USER_NAME = %(username)s
AND WAREHOUSE_NAME = %(warehouse_name)s
AND QUERY_DATE >= TO_DATE('2025-08-13')
AND QUERY_DATE <= TO_DATE('2025-08-19')
AND {kpi_column} = 1
ORDER BY START_TIME DESC
LIMIT 1000
"""
SQL_GET_QUERY_DETAILS = """
SELECT *
FROM POLARSLED_DB.DEMO_UPLOAD.query_360_table
WHERE QUERY_ID = %(query_id)s
"""
CREATE OR REPLACE TABLE query_360_table
CLUSTER BY (WAREHOUSE_NAME, DATE(START_TIME), EXECUTION_STATUS)
AS
WITH
qh_base AS (
SELECT
q.*,
ROW_NUMBER() OVER (PARTITION BY q.QUERY_ID ORDER BY q.START_TIME DESC) AS rn
FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY q
WHERE START_TIME >= TO_TIMESTAMP('2025-08-13')
AND START_TIME <= TO_TIMESTAMP('2025-08-19')
AND q.WAREHOUSE_NAME IS NOT NULL
),
qh AS (
SELECT *
FROM qh_base
WHERE rn = 1
)
SELECT
-- 🔥 CORE DETAILS - Only what's needed for drill-down
qh.QUERY_ID,
qh.QUERY_HASH,
qh.SESSION_ID,
qh.TRANSACTION_ID,
qh.QUERY_TAG,
qh.USER_NAME,
qh.ROLE_NAME,
qh.ROLE_TYPE,
qh.USER_TYPE,
-- TIME DIMENSIONS
qh.START_TIME,
qh.END_TIME,
DATE(qh.START_TIME) AS QUERY_DATE,
HOUR(qh.START_TIME) AS QUERY_HOUR,
DAYOFWEEKISO(qh.START_TIME) AS DAY_OF_WEEK,
DAYNAME(qh.START_TIME) AS DAY_NAME,
-- 🔥 WAREHOUSE INFO - Only ID and NAME (as requested)
qh.WAREHOUSE_ID,
qh.WAREHOUSE_NAME,
qh.WAREHOUSE_SIZE,
qh.WAREHOUSE_TYPE,
qh.CLUSTER_NUMBER,
-- PERFORMANCE TIMES (SECONDS)
ROUND(COALESCE(qh.TOTAL_ELAPSED_TIME,0) / 1000, 3) AS TOTAL_ELAPSED_TIME_SECONDS,
ROUND(COALESCE(qh.EXECUTION_TIME,0) / 1000, 3) AS EXECUTION_TIME_SECONDS,
ROUND(COALESCE(qh.COMPILATION_TIME,0) / 1000, 3) AS COMPILATION_TIME_SECONDS,
ROUND(COALESCE(qh.QUEUED_PROVISIONING_TIME,0) / 1000, 3) AS QUEUED_PROVISIONING_TIME_SECONDS,
ROUND(COALESCE(qh.QUEUED_REPAIR_TIME,0) / 1000, 3) AS QUEUED_REPAIR_TIME_SECONDS,
ROUND(COALESCE(qh.QUEUED_OVERLOAD_TIME,0) / 1000, 3) AS QUEUED_OVERLOAD_TIME_SECONDS,
-- 🔥 DURATION BUCKETS - EXACT MATCH with your warehouse procedure
CASE WHEN COALESCE(qh.TOTAL_ELAPSED_TIME,0) < 1000 THEN 1 ELSE 0 END AS QUERIES_LT_1S,
CASE WHEN COALESCE(qh.TOTAL_ELAPSED_TIME,0) BETWEEN 1000 AND 5000 THEN 1 ELSE 0 END AS QUERIES_1_5S,
CASE WHEN COALESCE(qh.TOTAL_ELAPSED_TIME,0) BETWEEN 5001 AND 60000 THEN 1 ELSE 0 END AS QUERIES_6_60S,
CASE WHEN COALESCE(qh.TOTAL_ELAPSED_TIME,0) BETWEEN 60001 AND 300000 THEN 1 ELSE 0 END AS QUERIES_61_300S,
CASE WHEN COALESCE(qh.TOTAL_ELAPSED_TIME,0) > 300000 THEN 1 ELSE 0 END AS QUERIES_GT_300S,
-- 🔥 KPI FLAGS - EXACT MATCH with your warehouse procedure logic
-- 1) Queued queries - EXACT MATCH with procedure
CASE WHEN (
COALESCE(qh.QUEUED_OVERLOAD_TIME,0) > 0
OR COALESCE(qh.QUEUED_PROVISIONING_TIME,0) > 0
OR COALESCE(qh.QUEUED_REPAIR_TIME,0) > 0
) THEN 1 ELSE 0 END AS QUEUED_QUERIES_COUNT,
-- 2) Spillage queries - EXACT MATCH with procedure
CASE WHEN (
COALESCE(qh.BYTES_SPILLED_TO_LOCAL_STORAGE,0) > 0
OR COALESCE(qh.BYTES_SPILLED_TO_REMOTE_STORAGE,0) > 0
) THEN 1 ELSE 0 END AS SPILLAGE_QUERIES_COUNT,
-- 3) Blocked transactions - EXACT MATCH with procedure
CASE WHEN qh.TRANSACTION_BLOCKED_TIME IS NOT NULL THEN 1 ELSE 0 END AS BLOCKED_TXN_COUNT,
-- 4) Failed queries - EXACT MATCH with procedure
CASE WHEN qh.ERROR_CODE IS NOT NULL THEN 1 ELSE 0 END AS FAILED_QUERIES_COUNT,
-- 5) Incident queries - EXACT MATCH with procedure
CASE WHEN (
qh.EXECUTION_STATUS = 'INCIDENT'
OR qh.QUERY_TAG ILIKE '%incident%'
) THEN 1 ELSE 0 END AS INCIDENT_QUERIES_COUNT,
-- 🔥 Additional KPI flags for drill-down (not used in warehouse aggregation)
CASE WHEN (
COALESCE(qh.BYTES_SPILLED_TO_LOCAL_STORAGE,0) > 0
OR COALESCE(qh.BYTES_SPILLED_TO_REMOTE_STORAGE,0) > 0
) THEN 1 ELSE 0 END AS SPILLED_QUERIES,
CASE WHEN (
qh.WAREHOUSE_SIZE IN ('Medium','Large','X-Large','2X-Large','3X-Large','4X-Large','5X-Large','6X-Large')
AND COALESCE(qh.PERCENTAGE_SCANNED_FROM_CACHE,0) > 80
AND COALESCE(qh.TOTAL_ELAPSED_TIME,0) / 1000 > 30
) THEN 1 ELSE 0 END AS OVER_PROVISIONED_QUERIES,
CASE WHEN (
HOUR(qh.START_TIME) BETWEEN 9 AND 17
AND DAYOFWEEKISO(qh.START_TIME) BETWEEN 1 AND 5
AND COALESCE(qh.TOTAL_ELAPSED_TIME,0) / 1000 > 300
) THEN 1 ELSE 0 END AS PEAK_HOUR_LONG_RUNNING_QUERIES,
CASE WHEN REGEXP_LIKE(qh.QUERY_TEXT, 'SELECT\\s*\\*', 'i') THEN 1 ELSE 0 END AS SELECT_STAR_QUERIES,
CASE WHEN (
COALESCE(qh.PARTITIONS_SCANNED,0) = COALESCE(qh.PARTITIONS_TOTAL,0)
AND COALESCE(qh.PARTITIONS_TOTAL,0) > 1
AND COALESCE(qh.BYTES_SCANNED,0) > POWER(1024,3)
) THEN 1 ELSE 0 END AS UNPARTITIONED_SCAN_QUERIES,
CASE WHEN COUNT(*) OVER (PARTITION BY qh.QUERY_HASH) > 1 AND qh.QUERY_HASH IS NOT NULL THEN 1 ELSE 0 END AS REPEATED_QUERIES,
CASE WHEN (
(LENGTH(qh.QUERY_TEXT) - LENGTH(REPLACE(UPPER(qh.QUERY_TEXT), 'JOIN', ''))) / 4 > 3
OR UPPER(qh.QUERY_TEXT) LIKE '%WINDOW%'
OR UPPER(qh.QUERY_TEXT) LIKE '%WITH%'
) THEN 1 ELSE 0 END AS COMPLEX_JOIN_QUERIES,
CASE WHEN (
COALESCE(qh.ROWS_PRODUCED,0) = 0
AND UPPER(qh.QUERY_TEXT) LIKE 'SELECT%'
AND COALESCE(qh.TOTAL_ELAPSED_TIME,0) / 1000 > 5
) THEN 1 ELSE 0 END AS ZERO_RESULT_QUERIES,
CASE WHEN COALESCE(qh.COMPILATION_TIME,0) / 1000 > 10 THEN 1 ELSE 0 END AS HIGH_COMPILE_QUERIES,
-- Credits calculation for individual queries
CASE
WHEN COALESCE(qh.EXECUTION_TIME,0) = 0 THEN 0
ELSE ROUND(
(qh.EXECUTION_TIME::FLOAT / 3600000) *
CASE qh.WAREHOUSE_SIZE
WHEN 'X-Small' THEN 1
WHEN 'Small' THEN 2
WHEN 'Medium' THEN 4
WHEN 'Large' THEN 8
WHEN 'X-Large' THEN 16
WHEN '2X-Large' THEN 32
WHEN '3X-Large' THEN 64
WHEN '4X-Large' THEN 128
WHEN '5X-Large' THEN 256
WHEN '6X-Large' THEN 512
ELSE 1
END
, 6)
END AS CREDITS_USED,
-- Essential columns for drill-down details
qh.BYTES_SCANNED,
qh.PERCENTAGE_SCANNED_FROM_CACHE,
qh.ROWS_PRODUCED,
qh.PARTITIONS_SCANNED,
qh.PARTITIONS_TOTAL,
COALESCE(qh.BYTES_SPILLED_TO_LOCAL_STORAGE, 0) AS BYTES_SPILLED_LOCAL,
COALESCE(qh.BYTES_SPILLED_TO_REMOTE_STORAGE, 0) AS BYTES_SPILLED_REMOTE,
qh.EXECUTION_STATUS,
qh.ERROR_CODE,
qh.ERROR_MESSAGE,
LEFT(qh.QUERY_TEXT, 2000) AS QUERY_TEXT_SAMPLE,
qh.DATABASE_NAME,
qh.SCHEMA_NAME,
qh.QUERY_TYPE
FROM qh;
Top comments (0)
Subscribe
For further actions, you may consider blocking this person and/or reporting abuse
We're a place where coders share, stay up-to-date and grow their careers.
Top comments (0)