DEV Community

Armaan Khan
Armaan Khan

Posted on

pormpt

i will share my queries and requrometns . you have to create a react and falsk as backed and we haev to run qurires to snowflakes andthan we haev to passed the data to react and than we have to create a interactive table based on that .this is my warehouse query CREATE OR REPLACE TABLE WAREHOUSE_ANALYTICS_DASHBOARD_with_queries AS

WITH warehouse_info AS (

    -- Get warehouse metadata

    SELECT DISTINCT

        wh.WAREHOUSE_ID,

        wh.WAREHOUSE_NAME,

        wh.SIZE,

        wh.WAREHOUSE_TYPE,

        wh.CLUSTER_COUNT,

        NULL as SUSPEND_POLICY,

        NULL as MIN_CLUSTER_COUNT,

        NULL as MAX_CLUSTER_COUNT

    FROM SNOWFLAKE.ACCOUNT_USAGE.WAREHOUSE_EVENTS_HISTORY wh

    WHERE wh.TIMESTAMP >= CURRENT_DATE - 1

),



query_buckets AS (

    SELECT

        q.WAREHOUSE_ID,

        q.WAREHOUSE_NAME,

        q.QUERY_ID,

        q.TOTAL_ELAPSED_TIME,

        q.EXECUTION_STATUS,

        q.CREDITS_USED_CLOUD_SERVICES,

        q.BYTES_SPILLED_TO_LOCAL_STORAGE,

        q.BYTES_SPILLED_TO_REMOTE_STORAGE,

        q.QUERY_TYPE,



        CASE

            WHEN q.TOTAL_ELAPSED_TIME <= 10000 THEN '1-10 seconds'

            WHEN q.TOTAL_ELAPSED_TIME <= 20000 THEN '10-20 seconds'

            WHEN q.TOTAL_ELAPSED_TIME <= 60000 THEN '20-60 seconds'

            WHEN q.TOTAL_ELAPSED_TIME <= 180000 THEN '1-3 minutes'

            WHEN q.TOTAL_ELAPSED_TIME <= 300000 THEN '3-5 minutes'

            ELSE '5+ minutes'

        END as DURATION_BUCKET,



        CASE

            WHEN (q.QUEUED_PROVISIONING_TIME + q.QUEUED_REPAIR_TIME + q.QUEUED_OVERLOAD_TIME) <= 120000 THEN '1-2 minutes'

            WHEN (q.QUEUED_PROVISIONING_TIME + q.QUEUED_REPAIR_TIME + q.QUEUED_OVERLOAD_TIME) <= 300000 THEN '2-5 minutes'

            WHEN (q.QUEUED_PROVISIONING_TIME + q.QUEUED_REPAIR_TIME + q.QUEUED_OVERLOAD_TIME) <= 600000 THEN '5-10 minutes'

            WHEN (q.QUEUED_PROVISIONING_TIME + q.QUEUED_REPAIR_TIME + q.QUEUED_OVERLOAD_TIME) <= 1200000 THEN '10-20 minutes'

            ELSE '20+ minutes'

        END as QUEUED_BUCKET,



        CASE

            WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.2 THEN '0-20 cents'

            WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.4 THEN '20-40 cents'

            WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.6 THEN '40-60 cents'

            WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.8 THEN '60-80 cents'

            WHEN q.CREDITS_USED_CLOUD_SERVICES <= 1.0 THEN '80-100 cents'

            ELSE '100+ cents'

        END as CREDIT_UTILIZATION_BUCKET



    FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY q

    WHERE q.START_TIME >= CURRENT_DATE - 1

        AND q.WAREHOUSE_ID IS NOT NULL

        AND q.USER_NAME NOT IN ('SNOWFLAKE', 'SNOWFLAKE_MONITOR')

        AND q.QUERY_TYPE NOT IN ('SHOW', 'DESCRIBE', 'USE', 'CREATE', 'DROP', 'ALTER')

        AND q.QUERY_TEXT IS NOT NULL

),



warehouse_metrics AS (

    SELECT

        wm.WAREHOUSE_ID,

        wm.WAREHOUSE_NAME,

        SUM(wm.CREDITS_USED) as TOTAL_CREDITS_USED,

        SUM(wm.CREDITS_USED_COMPUTE) as TOTAL_COMPUTE_CREDITS,

        SUM(wm.CREDITS_USED_CLOUD_SERVICES) as TOTAL_CLOUD_SERVICES_CREDITS

    FROM SNOWFLAKE.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY wm

    WHERE wm.START_TIME >= CURRENT_DATE - 1

    GROUP BY wm.WAREHOUSE_ID, wm.WAREHOUSE_NAME

)



SELECT

    COALESCE(wi.WAREHOUSE_ID, qb.WAREHOUSE_ID, wm.WAREHOUSE_ID) as WAREHOUSE_ID,

    COALESCE(wi.WAREHOUSE_NAME, qb.WAREHOUSE_NAME, wm.WAREHOUSE_NAME) as WAREHOUSE_NAME,

    wi.SIZE as WAREHOUSE_SIZE,

    wi.WAREHOUSE_TYPE,

    wi.CLUSTER_COUNT,

    wi.SUSPEND_POLICY,

    wi.MIN_CLUSTER_COUNT,

    wi.MAX_CLUSTER_COUNT,



    COUNT(CASE WHEN qb.DURATION_BUCKET = '1-10 seconds' THEN 1 END) as QUERIES_1_10_SEC,

    COUNT(CASE WHEN qb.DURATION_BUCKET = '10-20 seconds' THEN 1 END) as QUERIES_10_20_SEC,

    COUNT(CASE WHEN qb.DURATION_BUCKET = '20-60 seconds' THEN 1 END) as QUERIES_20_60_SEC,

    COUNT(CASE WHEN qb.DURATION_BUCKET = '1-3 minutes' THEN 1 END) as QUERIES_1_3_MIN,

    COUNT(CASE WHEN qb.DURATION_BUCKET = '3-5 minutes' THEN 1 END) as QUERIES_3_5_MIN,

    COUNT(CASE WHEN qb.DURATION_BUCKET = '5+ minutes' THEN 1 END) as QUERIES_5_PLUS_MIN,



    COUNT(CASE WHEN qb.QUEUED_BUCKET = '1-2 minutes' THEN 1 END) as QUEUED_1_2_MIN,

    COUNT(CASE WHEN qb.QUEUED_BUCKET = '2-5 minutes' THEN 1 END) as QUEUED_2_5_MIN,

    COUNT(CASE WHEN qb.QUEUED_BUCKET = '5-10 minutes' THEN 1 END) as QUEUED_5_10_MIN,

    COUNT(CASE WHEN qb.QUEUED_BUCKET = '10-20 minutes' THEN 1 END) as QUEUED_10_20_MIN,

    COUNT(CASE WHEN qb.QUEUED_BUCKET = '20+ minutes' THEN 1 END) as QUEUED_20_PLUS_MIN,



    COUNT(CASE WHEN qb.BYTES_SPILLED_TO_LOCAL_STORAGE > 0 THEN 1 END) as QUERIES_SPILLED_LOCAL,

    COUNT(CASE WHEN qb.BYTES_SPILLED_TO_REMOTE_STORAGE > 0 THEN 1 END) as QUERIES_SPILLED_REMOTE,

    SUM(qb.BYTES_SPILLED_TO_LOCAL_STORAGE) as TOTAL_BYTES_SPILLED_LOCAL,

    SUM(qb.BYTES_SPILLED_TO_REMOTE_STORAGE) as TOTAL_BYTES_SPILLED_REMOTE,



    COUNT(CASE WHEN qb.EXECUTION_STATUS = 'FAIL' THEN 1 END) as FAILED_QUERIES,

    COUNT(CASE WHEN qb.EXECUTION_STATUS = 'SUCCESS' THEN 1 END) as SUCCESSFUL_QUERIES,

    COUNT(CASE WHEN qb.EXECUTION_STATUS = 'RUNNING' THEN 1 END) as RUNNING_QUERIES,



    COUNT(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '0-20 cents' THEN 1 END) as QUERIES_0_20_CENTS,

    COUNT(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '20-40 cents' THEN 1 END) as QUERIES_20_40_CENTS,

    COUNT(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '40-60 cents' THEN 1 END) as QUERIES_40_60_CENTS,

    COUNT(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '60-80 cents' THEN 1 END) as QUERIES_60_80_CENTS,

    COUNT(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '80-100 cents' THEN 1 END) as QUERIES_80_100_CENTS,

    COUNT(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '100+ cents' THEN 1 END) as QUERIES_100_PLUS_CENTS,



    -- New column: query_ids

    OBJECT_CONSTRUCT(

        '1-10_sec_ids', ARRAY_AGG(CASE WHEN qb.DURATION_BUCKET = '1-10 seconds' THEN qb.QUERY_ID END),

        '10-20_sec_ids', ARRAY_AGG(CASE WHEN qb.DURATION_BUCKET = '10-20 seconds' THEN qb.QUERY_ID END),

        '20-60_sec_ids', ARRAY_AGG(CASE WHEN qb.DURATION_BUCKET = '20-60 seconds' THEN qb.QUERY_ID END),

        '1-3_min_ids', ARRAY_AGG(CASE WHEN qb.DURATION_BUCKET = '1-3 minutes' THEN qb.QUERY_ID END),

        '3-5_min_ids', ARRAY_AGG(CASE WHEN qb.DURATION_BUCKET = '3-5 minutes' THEN qb.QUERY_ID END),

        '5_plus_min_ids', ARRAY_AGG(CASE WHEN qb.DURATION_BUCKET = '5+ minutes' THEN qb.QUERY_ID END),

        'queued_1-2_min_ids', ARRAY_AGG(CASE WHEN qb.QUEUED_BUCKET = '1-2 minutes' THEN qb.QUERY_ID END),

        'queued_2-5_min_ids', ARRAY_AGG(CASE WHEN qb.QUEUED_BUCKET = '2-5 minutes' THEN qb.QUERY_ID END),

        'queued_5-10_min_ids', ARRAY_AGG(CASE WHEN qb.QUEUED_BUCKET = '5-10 minutes' THEN qb.QUERY_ID END),

        'queued_10-20_min_ids', ARRAY_AGG(CASE WHEN qb.QUEUED_BUCKET = '10-20 minutes' THEN qb.QUERY_ID END),

        'queued_20_plus_min_ids', ARRAY_AGG(CASE WHEN qb.QUEUED_BUCKET = '20+ minutes' THEN qb.QUERY_ID END),

        'spilled_local_ids', ARRAY_AGG(CASE WHEN qb.BYTES_SPILLED_TO_LOCAL_STORAGE > 0 THEN qb.QUERY_ID END),

        'spilled_remote_ids', ARRAY_AGG(CASE WHEN qb.BYTES_SPILLED_TO_REMOTE_STORAGE > 0 THEN qb.QUERY_ID END),

        'failed_queries_ids', ARRAY_AGG(CASE WHEN qb.EXECUTION_STATUS = 'FAIL' THEN qb.QUERY_ID END),

        'successful_queries_ids', ARRAY_AGG(CASE WHEN qb.EXECUTION_STATUS = 'SUCCESS' THEN qb.QUERY_ID END),

        'running_queries_ids', ARRAY_AGG(CASE WHEN qb.EXECUTION_STATUS = 'RUNNING' THEN qb.QUERY_ID END),

        'credit_0-20_cents_ids', ARRAY_AGG(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '0-20 cents' THEN qb.QUERY_ID END),

        'credit_20-40_cents_ids', ARRAY_AGG(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '20-40 cents' THEN qb.QUERY_ID END),

        'credit_40-60_cents_ids', ARRAY_AGG(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '40-60 cents' THEN qb.QUERY_ID END),

        'credit_60-80_cents_ids', ARRAY_AGG(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '60-80 cents' THEN qb.QUERY_ID END),

        'credit_80-100_cents_ids', ARRAY_AGG(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '80-100 cents' THEN qb.QUERY_ID END),

        'credit_100_plus_cents_ids', ARRAY_AGG(CASE WHEN qb.CREDIT_UTILIZATION_BUCKET = '100+ cents' THEN qb.QUERY_ID END)

    ) as QUERY_IDS,



    COUNT(qb.QUERY_ID) as TOTAL_QUERIES,

    COALESCE(wm.TOTAL_CREDITS_USED, 0) as TOTAL_CREDITS_USED,

    COALESCE(wm.TOTAL_COMPUTE_CREDITS, 0) as TOTAL_COMPUTE_CREDITS,

    COALESCE(wm.TOTAL_CLOUD_SERVICES_CREDITS, 0) as TOTAL_CLOUD_SERVICES_CREDITS,



    CURRENT_TIMESTAMP as ANALYSIS_TIMESTAMP,

    CURRENT_DATE - 1 as ANALYSIS_DATE



FROM warehouse_info wi

FULL OUTER JOIN query_buckets qb

    ON wi.WAREHOUSE_ID = qb.WAREHOUSE_ID

FULL OUTER JOIN warehouse_metrics wm

    ON COALESCE(wi.WAREHOUSE_ID, qb.WAREHOUSE_ID) = wm.WAREHOUSE_ID



GROUP BY

    COALESCE(wi.WAREHOUSE_ID, qb.WAREHOUSE_ID, wm.WAREHOUSE_ID),

    COALESCE(wi.WAREHOUSE_NAME, qb.WAREHOUSE_NAME, wm.WAREHOUSE_NAME),

    wi.SIZE,

    wi.WAREHOUSE_TYPE,

    wi.CLUSTER_COUNT,

    wi.SUSPEND_POLICY,

    wi.MIN_CLUSTER_COUNT,

    wi.MAX_CLUSTER_COUNT,

    wm.TOTAL_CREDITS_USED,

    wm.TOTAL_COMPUTE_CREDITS,

    wm.TOTAL_CLOUD_SERVICES_CREDITS



ORDER BY TOTAL_QUERIES DESC;

and this is the qures query CREATE OR REPLACE TABLE QUERY_HISTORY_SUMMARY AS

SELECT 

    -- Query Identification

    q.QUERY_ID,

    q.QUERY_HASH,

    q.QUERY_PARAMETERIZED_HASH,

    LEFT(q.QUERY_TEXT, 100) as QUERY_TEXT_PREVIEW,

    q.QUERY_TYPE,

    q.QUERY_TAG,



    -- Timing Information

    q.START_TIME,

    q.END_TIME,

    q.TOTAL_ELAPSED_TIME,

    q.COMPILATION_TIME,

    q.EXECUTION_TIME,



    -- User and Session Info

    q.USER_NAME,

    q.USER_TYPE,

    q.ROLE_NAME,

    q.ROLE_TYPE,

    q.SESSION_ID,



    -- Warehouse Information

    q.WAREHOUSE_ID,

    q.WAREHOUSE_NAME,

    q.WAREHOUSE_SIZE,

    q.WAREHOUSE_TYPE,

    q.CLUSTER_NUMBER,



    -- Database Context

    q.DATABASE_ID,

    q.DATABASE_NAME,

    q.SCHEMA_ID,

    q.SCHEMA_NAME,

    q.USER_DATABASE_NAME,

    q.USER_SCHEMA_NAME,



    -- Execution Status

    q.EXECUTION_STATUS,

    q.ERROR_CODE,

    LEFT(q.ERROR_MESSAGE, 200) as ERROR_MESSAGE_PREVIEW,



    -- Performance Metrics

    q.BYTES_SCANNED,

    q.PERCENTAGE_SCANNED_FROM_CACHE,

    q.BYTES_WRITTEN,

    q.ROWS_PRODUCED,

    q.ROWS_INSERTED,

    q.ROWS_UPDATED,

    q.ROWS_DELETED,



    -- Resource Usage

    q.CREDITS_USED_CLOUD_SERVICES,

    q.BYTES_SPILLED_TO_LOCAL_STORAGE,

    q.BYTES_SPILLED_TO_REMOTE_STORAGE,

    q.PARTITIONS_SCANNED,

    q.PARTITIONS_TOTAL,



    -- Queue Times

    q.QUEUED_PROVISIONING_TIME,

    q.QUEUED_REPAIR_TIME,

    q.QUEUED_OVERLOAD_TIME,

    q.TRANSACTION_BLOCKED_TIME,



    -- Classification Buckets for easy filtering

    CASE 

        WHEN q.TOTAL_ELAPSED_TIME <= 10000 THEN '1-10 seconds'

        WHEN q.TOTAL_ELAPSED_TIME <= 20000 THEN '10-20 seconds' 

        WHEN q.TOTAL_ELAPSED_TIME <= 60000 THEN '20-60 seconds'

        WHEN q.TOTAL_ELAPSED_TIME <= 180000 THEN '1-3 minutes'

        WHEN q.TOTAL_ELAPSED_TIME <= 300000 THEN '3-5 minutes'

        ELSE '5+ minutes'

    END as DURATION_BUCKET,



    CASE 

        WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.2 THEN '0-20 cents'

        WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.4 THEN '20-40 cents'

        WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.6 THEN '40-60 cents'

        WHEN q.CREDITS_USED_CLOUD_SERVICES <= 0.8 THEN '60-80 cents'

        WHEN q.CREDITS_USED_CLOUD_SERVICES <= 1.0 THEN '80-100 cents'

        ELSE '100+ cents'

    END as CREDIT_BUCKET,



    CASE 

        WHEN q.BYTES_SPILLED_TO_LOCAL_STORAGE > 0 OR q.BYTES_SPILLED_TO_REMOTE_STORAGE > 0 THEN 'SPILLED'

        ELSE 'NO_SPILL'

    END as SPILL_STATUS,



    CASE 

        WHEN (q.QUEUED_PROVISIONING_TIME + q.QUEUED_REPAIR_TIME + q.QUEUED_OVERLOAD_TIME) > 0 THEN 'QUEUED'

        ELSE 'NOT_QUEUED'

    END as QUEUE_STATUS,



    -- Analysis metadata

    CURRENT_TIMESTAMP as ANALYSIS_TIMESTAMP,

    CURRENT_DATE - 1 as ANALYSIS_DATE



FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY q

WHERE q.START_TIME >= CURRENT_DATE - 1

    AND q.QUERY_ID IS NOT NULL

ORDER BY q.START_TIME DESC;



-- =====================================================

-- TABLE 2: QUERY_DETAILS_COMPLETE

-- This table provides complete details for any specific query

-- =====================================================



CREATE OR REPLACE TABLE QUERY_DETAILS_COMPLETE AS

WITH filtered_queries AS (

    SELECT

        q.*,

        -- Calculate partition scan percentage

        CASE

            WHEN q.PARTITIONS_TOTAL > 0 THEN

                ROUND((q.PARTITIONS_SCANNED::FLOAT / q.PARTITIONS_TOTAL::FLOAT) * 100, 2)

            ELSE 0

        END as PARTITION_SCAN_PERCENTAGE,

        

        -- Calculate compilation time percentage

        CASE

            WHEN q.TOTAL_ELAPSED_TIME > 0 THEN

                ROUND((q.COMPILATION_TIME::FLOAT / q.TOTAL_ELAPSED_TIME::FLOAT) * 100, 2)

            ELSE 0

        END as COMPILATION_TIME_PERCENTAGE,

        

        -- Calculate execution time percentage

        CASE

            WHEN q.TOTAL_ELAPSED_TIME > 0 THEN

                ROUND((q.EXECUTION_TIME::FLOAT / q.TOTAL_ELAPSED_TIME::FLOAT) * 100, 2)

            ELSE 0

        END as EXECUTION_TIME_PERCENTAGE,

        

        -- Calculate rows per MB scanned

        CASE

            WHEN q.BYTES_SCANNED > 0 THEN

                ROUND(q.ROWS_PRODUCED::FLOAT / (q.BYTES_SCANNED::FLOAT / 1024 / 1024), 2)

            ELSE 0

        END as ROWS_PER_MB_SCANNED,

        

        -- Classify performance

        CASE

            WHEN q.TOTAL_ELAPSED_TIME <= 1000 THEN 'VERY_FAST'

            WHEN q.TOTAL_ELAPSED_TIME <= 10000 THEN 'FAST'

            WHEN q.TOTAL_ELAPSED_TIME <= 60000 THEN 'MODERATE'

            WHEN q.TOTAL_ELAPSED_TIME <= 300000 THEN 'SLOW'

            ELSE 'VERY_SLOW'

        END as PERFORMANCE_CATEGORY,

        

        -- Classify cache efficiency

        CASE

            WHEN q.PERCENTAGE_SCANNED_FROM_CACHE >= 90 THEN 'HIGH_CACHE_HIT'

            WHEN q.PERCENTAGE_SCANNED_FROM_CACHE >= 50 THEN 'MEDIUM_CACHE_HIT'

            WHEN q.PERCENTAGE_SCANNED_FROM_CACHE > 0 THEN 'LOW_CACHE_HIT'

            ELSE 'NO_CACHE_HIT'

        END as CACHE_EFFICIENCY,

        

        -- Classify spilling

        CASE

            WHEN q.BYTES_SPILLED_TO_LOCAL_STORAGE > 0 AND q.BYTES_SPILLED_TO_REMOTE_STORAGE > 0 THEN 'BOTH_SPILL'

            WHEN q.BYTES_SPILLED_TO_REMOTE_STORAGE > 0 THEN 'REMOTE_SPILL'

            WHEN q.BYTES_SPILLED_TO_LOCAL_STORAGE > 0 THEN 'LOCAL_SPILL'

            ELSE 'NO_SPILL'

        END as SPILL_CLASSIFICATION

        

    FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY q

    WHERE q.START_TIME >= CURRENT_DATE - 1

        AND q.QUERY_ID IS NOT NULL

        AND q.USER_NAME NOT IN ('SNOWFLAKE', 'SNOWFLAKE_MONITOR')

        AND q.QUERY_TYPE NOT IN ('SHOW', 'DESCRIBE', 'USE', 'CREATE', 'DROP', 'ALTER')

)

SELECT 

    -- Core Query Information

    QUERY_ID,

    QUERY_TEXT,

    QUERY_HASH,

    QUERY_HASH_VERSION,

    QUERY_PARAMETERIZED_HASH,

    QUERY_PARAMETERIZED_HASH_VERSION,

    QUERY_TYPE,

    QUERY_TAG,



    -- Timing Details (all in milliseconds)

    START_TIME,

    END_TIME,

    TOTAL_ELAPSED_TIME,

    COMPILATION_TIME,

    EXECUTION_TIME,

    QUEUED_PROVISIONING_TIME,

    QUEUED_REPAIR_TIME,

    QUEUED_OVERLOAD_TIME,

    TRANSACTION_BLOCKED_TIME,

    CHILD_QUERIES_WAIT_TIME,

    QUERY_RETRY_TIME,

    QUERY_RETRY_CAUSE,

    FAULT_HANDLING_TIME,

    LIST_EXTERNAL_FILES_TIME,



    -- User and Authentication

    USER_NAME,

    USER_TYPE,

    ROLE_NAME,

    ROLE_TYPE,

    SECONDARY_ROLE_STATS,

    SESSION_ID,



    -- Warehouse and Compute

    WAREHOUSE_ID,

    WAREHOUSE_NAME,

    WAREHOUSE_SIZE,

    WAREHOUSE_TYPE,

    CLUSTER_NUMBER,

    QUERY_LOAD_PERCENT,



    -- Database Context

    DATABASE_ID,

    DATABASE_NAME,

    SCHEMA_ID,

    SCHEMA_NAME,

    USER_DATABASE_ID,

    USER_DATABASE_NAME,

    USER_SCHEMA_ID,

    USER_SCHEMA_NAME,



    -- Execution Results

    EXECUTION_STATUS,

    ERROR_CODE,

    ERROR_MESSAGE,

    IS_CLIENT_GENERATED_STATEMENT,



    -- Data Processing Metrics

    BYTES_SCANNED,

    PERCENTAGE_SCANNED_FROM_CACHE,

    BYTES_WRITTEN,

    BYTES_WRITTEN_TO_RESULT,

    BYTES_READ_FROM_RESULT,

    ROWS_PRODUCED,

    ROWS_WRITTEN_TO_RESULT,

    ROWS_INSERTED,

    ROWS_UPDATED,

    ROWS_DELETED,

    ROWS_UNLOADED,

    BYTES_DELETED,



    -- Partitioning

    PARTITIONS_SCANNED,

    PARTITIONS_TOTAL,

    PARTITION_SCAN_PERCENTAGE,



    -- Memory and Spilling

    BYTES_SPILLED_TO_LOCAL_STORAGE,

    BYTES_SPILLED_TO_REMOTE_STORAGE,

    BYTES_SENT_OVER_THE_NETWORK,



    -- Credits and Cost

    CREDITS_USED_CLOUD_SERVICES,



    -- Data Transfer

    OUTBOUND_DATA_TRANSFER_CLOUD,

    OUTBOUND_DATA_TRANSFER_REGION,

    OUTBOUND_DATA_TRANSFER_BYTES,

    INBOUND_DATA_TRANSFER_CLOUD,

    INBOUND_DATA_TRANSFER_REGION,

    INBOUND_DATA_TRANSFER_BYTES,



    -- External Functions

    EXTERNAL_FUNCTION_TOTAL_INVOCATIONS,

    EXTERNAL_FUNCTION_TOTAL_SENT_ROWS,

    EXTERNAL_FUNCTION_TOTAL_RECEIVED_ROWS,

    EXTERNAL_FUNCTION_TOTAL_SENT_BYTES,

    EXTERNAL_FUNCTION_TOTAL_RECEIVED_BYTES,



    -- Query Acceleration

    QUERY_ACCELERATION_BYTES_SCANNED,

    QUERY_ACCELERATION_PARTITIONS_SCANNED,

    QUERY_ACCELERATION_UPPER_LIMIT_SCALE_FACTOR,



    -- Transaction Information

    TRANSACTION_ID,

    

    -- System Information

    RELEASE_VERSION,



    -- Performance Ratios and Calculated Fields

    COMPILATION_TIME_PERCENTAGE,

    EXECUTION_TIME_PERCENTAGE,

    ROWS_PER_MB_SCANNED,



    -- Performance Classifications

    PERFORMANCE_CATEGORY,

    CACHE_EFFICIENCY,

    SPILL_CLASSIFICATION,



    -- Analysis metadata

    CURRENT_TIMESTAMP as ANALYSIS_TIMESTAMP,

    CURRENT_DATE - 1 as ANALYSIS_DATE



FROM filtered_queries

ORDER BY START_TIME DESC;



-- now what you have to do create a python flask api i will pass you the snwoalke curosr dicetly jutst take that and expcutre queire to get this three tabels ad stored in csv json or fatsted or best file fomrts in locak . than use that as caced data . now what i want is that in react based on this data first creat a warehouse table an all queires colmn in warehuse tabel are clickable . than wehn click on that so take to query pagge gropy by user . so take the warehouse name , and id for that sepcifl click . for exmple if click on 1-10 section quries so only take the queires id for that and tan on query hsirot or summary tabel extart those query and gripu by user and thna hsow the table . for exmple in warehouse table i click on 1-10 section quires which is 35 so wehn i click on that so you have to show all those 35 quries with respose to user . in table . than wehn i click on user so show all the query for that user on that category . and than shwo a tabel with quees and a button to show detailed quries and than when click on that shwo the fulld etaled queres .
Enter fullscreen mode Exit fullscreen mode

Top comments (0)