"Untitled Post" - Views: 2,268 · Hits: 2,268 - Type: Unlisted

SET ANSI_NULLS ON
SET ANSI_NULL_DFLT_ON ON
SET ANSI_PADDING ON
SET ANSI_WARNINGS ON
SET ARITHABORT ON
SET CONCAT_NULL_YIELDS_NULL ON
SET NUMERIC_ROUNDABORT OFF
SET QUOTED_IDENTIFIER ON
GO

/*
    ======
    Query Hash Stats MDW Tables
    ======
*/
IF NOT EXISTS (SELECT * FROM sys.extended_properties WHERE name = 'Microsoft_DataCollector_MDW_Version')
BEGIN
    RAISERROR ('ERROR: This script must be executed within a Management Data Warehouse database.  Please change the database context to your MDW database before running the script.', 21, 1) WITH NOWAIT, LOG;
END;


RAISERROR ('Configuring Query Hash Stats collector in MDW database...', 0, 1) WITH NOWAIT;


/*
    Define this script's expected table schema version
*/
DECLARE @ExpectedTableSchemaVersion INT;
SET @ExpectedTableSchemaVersion = 46;

/*
    Get the schema version from the current stats table in MDW
*/
DECLARE @ExistingTableSchemaVersion INT;
SELECT @ExistingTableSchemaVersion = CONVERT (int, value)
FROM fn_listextendedproperty ('QueryHashStatsSchemaVersion', 'SCHEMA', 'custom_snapshots', 'TABLE', 'query_hash_stats', NULL, NULL)

/* 
    Drop the stats table if it's out of date
*/ 
IF OBJECT_ID('custom_snapshots.query_hash_stats', 'U') IS NOT NULL
    AND (@ExpectedTableSchemaVersion != ISNULL (@ExistingTableSchemaVersion, -1))
BEGIN
    RAISERROR ('Dropping table query_hash_stats (current schema version: %d, new schema version: %d)...', 
        0, 1, @ExistingTableSchemaVersion, @ExpectedTableSchemaVersion) WITH NOWAIT;
    DROP TABLE custom_snapshots.query_hash_stats;
END;

SELECT @ExistingTableSchemaVersion = CONVERT (int, value)
FROM fn_listextendedproperty ('QueryHashStatsSchemaVersion', 'SCHEMA', 'custom_snapshots', 'TABLE', 'query_hash_text_and_plan', NULL, NULL)

/* 
    Drop the stats table if it's out of date
*/ 
IF OBJECT_ID('custom_snapshots.query_hash_text_and_plan', 'U') IS NOT NULL
    AND (@ExpectedTableSchemaVersion != ISNULL (@ExistingTableSchemaVersion, -1))
BEGIN
    RAISERROR ('Dropping table query_hash_text_and_plan (current schema version: %d, new schema version: %d)...', 
        0, 1, @ExistingTableSchemaVersion, @ExpectedTableSchemaVersion) WITH NOWAIT;
    DROP TABLE custom_snapshots.query_hash_text_and_plan;
END;


/*
    Create the MDW tables if they don't already exist
*/ 
IF OBJECT_ID ('custom_snapshots.query_hash_stats') IS NULL
BEGIN
    RAISERROR ('Creating table custom_snapshots.query_hash_stats...',0, 1) WITH NOWAIT;
    CREATE TABLE custom_snapshots.query_hash_stats(
        query_hash                          BINARY(8) NOT NULL,
        query_plan_hash                     BINARY(10) NOT NULL,

        in_memory_plan_count                INT NULL,
        min_creation_time                   DATETIMEOFFSET(7) NULL,
        max_creation_time                   DATETIMEOFFSET(7) NULL,
        last_execution_time                 DATETIMEOFFSET(7) NULL,
        max_plan_generation_num             INT NULL,

        total_completed_execution_count     BIGINT NULL,
        in_progress_execution_count         INT NULL,
        interval_completed_execution_count  BIGINT NULL,
        interval_execution_count            BIGINT NULL,
        executions_per_min                  BIGINT NULL,

        total_completed_cpu_time_ms         BIGINT NULL,
        min_cpu_time_ms                     BIGINT NULL,
        max_cpu_time_ms                     BIGINT NULL,
        in_progress_cpu_time_ms             BIGINT NULL,
        interval_completed_cpu_time_ms      BIGINT NULL,
        interval_cpu_time_ms                BIGINT NULL,
        cpu_time_ms_per_sec                 BIGINT NULL,
        average_cpu_time_ms                 BIGINT NULL,

        total_completed_physical_reads      BIGINT NULL,
        min_physical_reads                  BIGINT NULL,
        max_physical_reads                  BIGINT NULL,
        in_progress_physical_reads          BIGINT NULL,
        interval_completed_physical_reads   BIGINT NULL,
        interval_physical_reads             BIGINT NULL,
        physical_reads_per_sec              BIGINT NULL,
        average_physical_reads              BIGINT NULL,

        total_completed_logical_reads       BIGINT NULL,
        min_logical_reads                   BIGINT NULL,
        max_logical_reads                   BIGINT NULL,
        in_progress_logical_reads           BIGINT NULL,
        interval_completed_logical_reads    BIGINT NULL,
        interval_logical_reads              BIGINT NULL,
        logical_reads_per_sec               BIGINT NULL,
        average_logical_reads               BIGINT NULL,

        total_completed_logical_writes      BIGINT NULL,
        min_logical_writes                  BIGINT NULL,
        max_logical_writes                  BIGINT NULL,
        in_progress_logical_writes          BIGINT NULL,
        interval_completed_logical_writes   BIGINT NULL,
        interval_logical_writes             BIGINT NULL,
        logical_writes_per_sec              BIGINT NULL,
        average_logical_writes              BIGINT NULL,

        total_completed_clr_time_ms         BIGINT NULL,
        min_clr_time_ms                     BIGINT NULL,
        max_clr_time_ms                     BIGINT NULL,
        in_progress_clr_time_ms             BIGINT NULL,
        interval_completed_clr_time_ms      BIGINT NULL,
        interval_clr_time_ms                BIGINT NULL,
        clr_time_ms_per_sec                 BIGINT NULL,
        average_clr_time_ms                 BIGINT NULL,

        total_completed_execution_time_ms   BIGINT NULL,
        min_execution_time_ms               BIGINT NULL,
        max_execution_time_ms               BIGINT NULL,
        in_progress_execution_time_ms       BIGINT NULL,
        interval_completed_execution_time_ms BIGINT NULL,
        interval_execution_time_ms          BIGINT NULL,
        execution_time_ms_per_sec           BIGINT NULL,
        average_execution_time_ms           BIGINT NULL,

        interval_cpu_time_rank              SMALLINT NULL,
        max_cpu_time_rank                   SMALLINT NULL,
        interval_logical_reads_rank         SMALLINT NULL,
        max_logical_reads_rank              SMALLINT NULL,
        interval_physical_reads_rank        SMALLINT NULL,
        max_physical_reads_rank             SMALLINT NULL,
        interval_logical_writes_rank        SMALLINT NULL,
        max_logical_writes_rank             SMALLINT NULL,
        plan_count_rank                     SMALLINT NULL,
        execution_count_rank                SMALLINT NULL,
        average_execution_time_rank         SMALLINT NULL,
        interval_execution_time_rank        SMALLINT NULL,
        max_execution_time_rank             SMALLINT NULL,
        interval_clr_time_rank              SMALLINT NULL,
        max_clr_time_rank                   SMALLINT NULL,
        max_plan_generation_rank            SMALLINT NULL, 

        collection_time                     DATETIMEOFFSET(7) NOT NULL,
        snapshot_id                         INT NOT NULL
    );

    ALTER TABLE custom_snapshots.query_hash_stats ADD CONSTRAINT pk_query_hash_stats 
    PRIMARY KEY NONCLUSTERED (snapshot_id, collection_time, query_hash, query_plan_hash);

    -- Create this FK with ON DELETE CASCADE so that the MDW purge job will clean up old 
    -- records according to the collection set's data retention period. 
    ALTER TABLE custom_snapshots.query_hash_stats ADD CONSTRAINT fk_query_hash_stats_snapshots_internal 
    FOREIGN KEY(snapshot_id) REFERENCES core.snapshots_internal (snapshot_id)
    ON DELETE CASCADE;

    /* 
        Tag the table with a schema version.  We'll use this to determine 
        whether we need to drop the existing table because a more up-to-date 
        schema version exists. 
    */
    EXEC sp_addextendedproperty 
        @name = 'QueryHashStatsSchemaVersion', 
        @value = @ExpectedTableSchemaVersion, 
        @level0type = 'SCHEMA', 
        @level0name = 'custom_snapshots', 
        @level1type = 'TABLE', 
        @level1name = 'query_hash_stats';
END;


-- Create the query_hash_text_and_plan table, which stores a representative query text and 
-- XML showplan for each of the query plans in the query_hash_stats table.  Note that 
-- it is possible that we may fail to collect plans & SQL text for a subset of the plans
-- referenced by query_hash_stats; the server may remove a cached plan object from 
-- cache at any moment, and occasionally we'll decide to collect stats for a plan but 
-- the plan will be flushed from cache before we can harvest a representative query plan. 
IF OBJECT_ID ('custom_snapshots.query_hash_text_and_plan') IS NULL
BEGIN
    RAISERROR ('Creating table custom_snapshots.query_hash_text_and_plan...',0, 1) WITH NOWAIT;
    CREATE TABLE custom_snapshots.query_hash_text_and_plan (
	    query_plan_hash                 VARBINARY(8) NOT NULL,
	    query_hash                      VARBINARY(8) NOT NULL,
	    first_collection_time           DATETIMEOFFSET(7) NULL,
	    sample_plan_handle              BINARY(64) NULL,
	    sample_plan_generation_number   INT NULL,
	    sample_creation_time            DATETIMEOFFSET(7) NULL,
	    sample_query_plan               NVARCHAR(MAX) NULL,
	    sample_sql_handle               BINARY(64) NULL,
	    sample_statement_start_offset   INT NULL,
	    sample_statement_end_offset     INT NULL,
	    sample_database_name            NVARCHAR(128) NULL,
	    sample_object_name              NVARCHAR(270) NULL,
	    sample_statement_text           NVARCHAR(MAX) NULL,
	    source_id                       INT NULL, 
	    snapshot_id                     INT NULL
    );

    -- The query plan hash is the PK, but it is possible for the collection set to send us 
    -- the same query plan hash more than once.  This is normal and we don't want it to fail 
    -- collection, so we create the primary key with IGNORE_DUP_KEY to silently discard 
    -- duplicates. 
    ALTER TABLE custom_snapshots.query_hash_text_and_plan ADD CONSTRAINT pk_query_hash_text_and_plan 
    PRIMARY KEY CLUSTERED (query_hash, query_plan_hash) 
        WITH (IGNORE_DUP_KEY=ON);

    -- Create an index on database/object name to allow efficient filtering & grouping
    CREATE NONCLUSTERED INDEX idx_query_hash_text_and_plan_database_name 
        ON custom_snapshots.query_hash_text_and_plan (sample_database_name, sample_object_name);

    /*
        Note that we won't have the typical ON DELETE CASCADE foreign key on this table 
        referencing core.snapshots_internal. This is because query_hash_text_and_plan is a 
        lookup table and should be exempt from the collection set's data retention period. In 
        other words, scheduled MDW data purges don't affect it.  Under normal conditions, its 
        size will stabilize once we have a representative plan for each of the interesting plan 
        fingerprints in use on the server.  Typical stable table size is in the low 1000s of 
        rows per SQL instance. 
    */
        
    /* 
        Tag the table with a schema version.  We'll use this to determine 
        whether we need to drop the existing table because a more up-to-date 
        schema version exists. 
    */
    EXEC sp_addextendedproperty 
        @name = 'QueryHashStatsSchemaVersion', 
        @value = @ExpectedTableSchemaVersion, 
        @level0type = 'SCHEMA', 
        @level0name = 'custom_snapshots', 
        @level1type = 'TABLE', 
        @level1name = 'query_hash_text_and_plan';
END;

-- In earlier versions of this script, we would create a foreign key from the plan/text 
-- table to the MDW snapshots table. The FK intentionally did not use ON DELETE CASCADE, 
-- so it could block scheduled MDW data purge. If this FK exists, drop it. 
IF EXISTS (
    SELECT * 
    FROM sys.foreign_keys 
    WHERE name = 'fk_query_hash_text_and_plan_snapshots_internal'
        AND parent_object_id = OBJECT_ID('custom_snapshots.query_hash_text_and_plan'))
BEGIN
    ALTER TABLE custom_snapshots.query_hash_text_and_plan 
    DROP CONSTRAINT fk_query_hash_text_and_plan_snapshots_internal;
END;


-- Create the custom_mdw_overview_reports table. This is used by the MdwOverviewCustom.rdl
-- report. The table lists all available top-level custom reports and all of the SQL instances 
-- that have data in the MDW db to support that report.  It also maps each of these reports 
-- to a collection set so that the report can display the last upload time for the data that 
-- drives a report. 
IF OBJECT_ID ('dbo.custom_mdw_overview_reports') IS NULL
BEGIN
    RAISERROR ('Creating table dbo.custom_mdw_overview_reports...',0, 1) WITH NOWAIT;

    CREATE TABLE dbo.custom_mdw_overview_reports (
        top_level_report_file_name          NVARCHAR(400) NOT NULL, 
        top_level_report_display_name       NVARCHAR(1024) NOT NULL, 
        collection_set_uid                  UNIQUEIDENTIFIER NOT NULL, 
        max_expected_upload_delay_minutes   INT
    );
    
    GRANT SELECT ON dbo.custom_mdw_overview_reports TO mdw_reader; 
    GRANT INSERT ON dbo.custom_mdw_overview_reports TO mdw_admin; 
    GRANT UPDATE ON dbo.custom_mdw_overview_reports TO mdw_admin; 
    GRANT DELETE ON dbo.custom_mdw_overview_reports TO mdw_admin; 
    GRANT SELECT ON dbo.custom_mdw_overview_reports TO mdw_admin; 
    
    ALTER TABLE dbo.custom_mdw_overview_reports ADD CONSTRAINT pk_custom_mdw_overview_reports 
    PRIMARY KEY CLUSTERED (top_level_report_file_name)
        WITH (IGNORE_DUP_KEY=ON)
END;

-- "Register" the top-level query stats report in this table
-- The GUID below is the identifier for the Query Hash Stats collection set
RAISERROR ('Registering the Query Hash Statistics top-level custom report...',0, 1) WITH NOWAIT;
DELETE FROM dbo.custom_mdw_overview_reports WHERE top_level_report_display_name = 'Query Hash Statistics';
INSERT INTO dbo.custom_mdw_overview_reports (top_level_report_file_name, top_level_report_display_name, 
    collection_set_uid, max_expected_upload_delay_minutes)
VALUES ('QueryHashStatsReportsTopN', 'Query Hash Statistics', '4262A02C-B669-4F50-ADF3-2857D590F125', 120); 

-- We want the overview report to act as a launch-point for the built-in system collection set reports, too
RAISERROR ('Registering the Server Activity top-level custom report...',0, 1) WITH NOWAIT;
DELETE FROM dbo.custom_mdw_overview_reports WHERE top_level_report_display_name = 'Server Activity';
INSERT INTO dbo.custom_mdw_overview_reports (top_level_report_file_name, top_level_report_display_name, 
    collection_set_uid, max_expected_upload_delay_minutes)
VALUES ('0E456729-BC31-47D5-9118-E7B9D2C9B25D', 'Server Activity', '49268954-4FD4-4EB6-AA04-CD59D9BB5714', 120); 

-- Skip query stats; query hash stats should be a more effective query analysis tool
DELETE FROM dbo.custom_mdw_overview_reports WHERE top_level_report_display_name = 'Query Statistics';
--INSERT INTO dbo.custom_mdw_overview_reports (top_level_report_file_name, top_level_report_display_name, 
--    collection_set_uid, max_expected_upload_delay_minutes)
--VALUES ('848841FA-069F-4058-A20C-B42E4C2C34D7', 'Query Statistics', '2DC02BD6-E230-4C05-8516-4E8C0EF21F95', 120); 

--DELETE FROM dbo.custom_mdw_overview_reports WHERE top_level_report_display_name = 'Disk Usage';
--INSERT INTO dbo.custom_mdw_overview_reports (top_level_report_file_name, top_level_report_display_name, 
--    collection_set_uid, max_expected_upload_delay_minutes)
--VALUES ('A4E3A3D2-CE44-4ED2-8F8C-4C0E471273AF', 'Disk Usage', '7B191952-8ECF-4E12-AEB2-EF646EF79FEF', 1200); 

--DELETE FROM dbo.custom_mdw_overview_reports WHERE top_level_report_display_name = 'Latch Statistics';
--INSERT INTO dbo.custom_mdw_overview_reports (top_level_report_file_name, top_level_report_display_name, 
--    collection_set_uid, max_expected_upload_delay_minutes)
--VALUES ('MdwReportSample', 'Latch Statistics', '49268954-4FD4-4EB6-AA04-CD59D9BB5714', 120);



-- Register our custom collector type with this MDW database so it is allowed to upload
RAISERROR ('Registering custom collector type in MDW...',0, 1) WITH NOWAIT;
EXEC core.sp_add_collector_type @collector_type_uid = '8657F26A-FF77-4F54-AB49-781D94B655E1'

Done_Label:
GO



/*
    ======
    Query Hash Stats Reporting Stored Procedures
    ======
*/

IF OBJECT_ID ('custom_snapshots.rpt_top_query_hash_stats') IS NOT NULL
BEGIN
    RAISERROR ('Dropping procedure custom_snapshots.rpt_top_query_hash_stats...', 0, 1) WITH NOWAIT;
    DROP PROC custom_snapshots.rpt_top_query_hash_stats;
END;
GO
RAISERROR ('Creating procedure custom_snapshots.rpt_top_query_hash_stats...', 0, 1) WITH NOWAIT;
GO
CREATE PROCEDURE custom_snapshots.rpt_top_query_hash_stats
    @instance_name      SYSNAME,
    @start_time         DATETIME = NULL,
    @end_time           DATETIME = NULL,
    @num_queries        INT = NULL, 
    @time_window_size   SMALLINT = NULL,
    @time_interval_min  SMALLINT = 1, 
    @order_by_criteria  VARCHAR(30) = 'CPU', 
    @database_name      NVARCHAR(255) = NULL, 
    -- BINARY values passed in hex string form so the proc can be called from Reporting Services, 
    -- which doesn't handle binary parameters well. 
    @query_hash_str     VARCHAR(32) = NULL
AS
BEGIN
    SET NOCOUNT ON;

    -- Clean params 
    DECLARE @topn INT;
    SET @topn = ISNULL (@num_queries, 10); 
    IF @database_name = '' SET @database_name = NULL

    -- @end_time should never be NULL when we are called from the QueryHashStatsTopN report
    -- Convert snapshot_time (DATETIMEOFFSET) to a UTC DATETIME
    IF (@end_time IS NULL)
    BEGIN
        SET @end_time = CONVERT (DATETIME, SWITCHOFFSET (CAST ((SELECT MAX(snapshot_time) FROM core.snapshots) AS DATETIMEOFFSET(7)), '+00:00'));
    END;

    IF (@start_time IS NULL)
    BEGIN
        -- If time_window_size and time_interval_min are set use them
        -- to determine the start time
        -- Otherwise use the earliest available snapshot_time
        IF @time_window_size IS NOT NULL AND @time_interval_min IS NOT NULL
        BEGIN
            SET @start_time = DATEADD(minute, @time_window_size * @time_interval_min * -1.0, @end_time);
        END
        ELSE
        BEGIN
            -- Convert min snapshot_time (DATETIMEOFFSET) to a UTC DATETIME
            SET @start_time = CONVERT (DATETIME, SWITCHOFFSET (CAST ((SELECT MIN(snapshot_time) FROM core.snapshots) AS DATETIMEOFFSET(7)), '+00:00'));
        END
    END

    DECLARE @end_snapshot_time_id INT;
    SELECT @end_snapshot_time_id = MAX(snapshot_time_id) FROM core.snapshots WHERE snapshot_time <= @end_time;

    DECLARE @start_snapshot_time_id INT;
    SELECT @start_snapshot_time_id = MIN(snapshot_time_id) FROM core.snapshots WHERE snapshot_time >= @start_time;

    DECLARE @interval_sec INT;
    SET @interval_sec = DATEDIFF (s, @start_time, @end_time)
    IF @interval_sec = 0 
    BEGIN
        -- Round up to prevent divide-by-zero
        SET @interval_sec = 1;
    END;


    SELECT 
        -- Convert VARBINARY to VARCHAR for RS
        master.dbo.fn_varbintohexstr (grouped_stats_with_charted_value.query_hash) AS query_hash_str, 
        master.dbo.fn_varbintohexstr (grouped_stats_with_charted_value.query_plan_hash) AS query_plan_hash_str, 
        grouped_stats_with_charted_value.*, 
        ROW_NUMBER() OVER (ORDER BY charted_value DESC) as query_rank,  
        CONVERT (DATETIME, SWITCHOFFSET (grouped_stats_with_charted_value.min_creation_time, '+00:00')) AS min_creation_time_str,

        sql.sample_database_name, 
        sql.sample_object_name, 
        REPLACE (REPLACE (REPLACE (REPLACE (REPLACE (REPLACE (
            LEFT (LTRIM (sql.sample_statement_text), 100)
            , CHAR(9), ' '), CHAR(10), ' '), CHAR(13), ' '), '   ', ' '), '  ', ' '), '  ', ' ') AS sample_statement_text_flat, 
        sql.sample_statement_text, 
        sql.first_collection_time, 
        -- Convert VARBINARY to VARCHAR for RS
        master.dbo.fn_varbintohexstr (sql.sample_sql_handle) AS sample_sql_handle_str, 
        sql.sample_statement_start_offset, 
        sql.sample_statement_end_offset, 
        -- Convert VARBINARY to VARCHAR for RS
        master.dbo.fn_varbintohexstr (sql.sample_plan_handle) AS sample_plan_handle_str, 
        sql.sample_creation_time, 
        sql.sample_plan_generation_number
    FROM 
    (
        -- This query adds the charted_value column so that we can order the set properly to return the TOP N queries
        SELECT TOP (@topn)
            CASE 
                -- Duration is expressed as a cumulative total (seconds)
                WHEN @order_by_criteria LIKE 'Duration' THEN grouped_stats.total_execution_time_ms
                -- For execution count we use a per-minute rate (executions/min)
                WHEN @order_by_criteria LIKE 'Execution Rate' THEN grouped_stats.executions_per_min 
                -- All other cumulative measures are expressed as a per-second average resource consumption rate (ms/sec)
                WHEN @order_by_criteria LIKE 'CPU' THEN grouped_stats.cpu_time_ms_per_sec 
                WHEN @order_by_criteria LIKE 'I/O' THEN grouped_stats.physical_reads_per_sec + grouped_stats.logical_writes_per_sec 
                WHEN @order_by_criteria LIKE 'Logical Reads' THEN grouped_stats.logical_reads_per_sec 
                WHEN @order_by_criteria LIKE 'Physical Reads' THEN grouped_stats.physical_reads_per_sec 
                WHEN @order_by_criteria LIKE 'Logical Writes' THEN grouped_stats.logical_writes_per_sec 
                ELSE grouped_stats.cpu_time_ms_per_sec 
            END AS charted_value, 
            
            grouped_stats.* 
        FROM 
        (
            -- This query calculates the total resources consumed by each query within the selected time window
            SELECT 
                stats.query_hash, 
                MAX (stats.query_plan_hash) AS query_plan_hash, 

                MAX (in_memory_plan_count) AS max_in_memory_plan_count, 
                MIN (min_creation_time) AS min_creation_time, 
                MAX (max_creation_time) AS max_creation_time, 
                MAX (last_execution_time) AS last_execution_time, 
                MAX (max_plan_generation_num) AS max_plan_generation_num, 
                COUNT (DISTINCT stats.query_plan_hash) AS query_plan_hash_count, 

                SUM (interval_execution_count) AS total_execution_count, 
                SUM (interval_completed_execution_count) AS total_completed_execution_count, 
                SUM (interval_execution_count) * 60.0 / @interval_sec AS executions_per_min, 
                MAX (in_progress_execution_count) AS max_in_progress_execution_count, 

                SUM (interval_cpu_time_ms) AS total_cpu_time_ms, 
                SUM (interval_completed_cpu_time_ms) AS total_completed_cpu_time_ms, 
                SUM (interval_cpu_time_ms) * 1.0 / @interval_sec AS cpu_time_ms_per_sec, 
                MIN (min_cpu_time_ms) AS min_cpu_time_ms, 
                MAX (max_cpu_time_ms) AS max_cpu_time_ms, 
                CASE 
                    WHEN SUM (total_completed_execution_count) = 0 THEN NULL 
                    ELSE SUM (total_completed_cpu_time_ms) * 1.0 / SUM (total_completed_execution_count) 
                END AS average_cpu_time_ms, 

                SUM (interval_physical_reads) AS total_physical_reads, 
                SUM (interval_completed_physical_reads) AS total_completed_physical_reads, 
                SUM (interval_physical_reads) * 1.0 / @interval_sec AS physical_reads_per_sec, 
                MIN (min_physical_reads) AS min_physical_reads, 
                MAX (max_physical_reads) AS max_physical_reads, 
                CASE 
                    WHEN SUM (total_completed_execution_count) = 0 THEN NULL 
                    ELSE SUM (total_completed_physical_reads) * 1.0 / SUM (total_completed_execution_count) 
                END AS average_physical_reads, 

                SUM (interval_logical_reads) AS total_logical_reads, 
                SUM (interval_completed_logical_reads) AS total_completed_logical_reads, 
                SUM (interval_logical_reads) * 1.0 / @interval_sec AS logical_reads_per_sec, 
                MIN (min_logical_reads) AS min_logical_reads, 
                MAX (max_logical_reads) AS max_logical_reads, 
                CASE 
                    WHEN SUM (total_completed_execution_count) = 0 THEN NULL 
                    ELSE SUM (total_completed_logical_reads) * 1.0 / SUM (total_completed_execution_count) 
                END AS average_logical_reads, 

                SUM (interval_logical_writes) AS total_logical_writes, 
                SUM (interval_completed_logical_writes) AS total_completed_logical_writes, 
                SUM (interval_logical_writes) * 1.0 / @interval_sec AS logical_writes_per_sec, 
                MIN (min_logical_writes) AS min_logical_writes, 
                MAX (max_logical_writes) AS max_logical_writes, 
                CASE 
                    WHEN SUM (total_completed_execution_count) = 0 THEN 0 
                    ELSE SUM (total_completed_logical_writes) * 1.0 / SUM (total_completed_execution_count) 
                END AS average_logical_writes, 

                SUM (interval_clr_time_ms) AS total_clr_time_ms, 
                SUM (interval_completed_clr_time_ms) AS total_completed_clr_time_ms, 
                SUM (interval_clr_time_ms) / @interval_sec AS clr_time_ms_per_sec, 
                MIN (min_clr_time_ms) AS min_clr_time_ms, 
                MAX (max_clr_time_ms) AS max_clr_time_ms, 
                CASE 
                    WHEN SUM (total_completed_execution_count) = 0 THEN NULL 
                    ELSE SUM (total_completed_clr_time_ms) * 1.0 / SUM (total_completed_execution_count) 
                END AS average_clr_time_ms, 

                SUM (interval_execution_time_ms) AS total_execution_time_ms, 
                SUM (interval_completed_execution_time_ms) AS total_completed_execution_time_ms, 
                SUM (interval_execution_time_ms) * 1.0 / @interval_sec AS execution_time_ms_per_sec, 
                MIN (min_execution_time_ms) AS min_execution_time_ms, 
                MAX (max_execution_time_ms) AS max_execution_time_ms, 
                CASE 
                    WHEN SUM (total_completed_execution_count) = 0 THEN NULL 
                    ELSE SUM (total_completed_execution_time_ms) * 1.0 / SUM (total_completed_execution_count) 
                END AS average_execution_time_ms
            FROM custom_snapshots.query_hash_stats AS stats
            INNER JOIN core.snapshots snap ON stats.snapshot_id = snap.snapshot_id
            LEFT OUTER JOIN custom_snapshots.query_hash_text_and_plan AS sql ON stats.query_hash = sql.query_hash AND stats.query_plan_hash = sql.query_plan_hash 
            WHERE
                snap.instance_name = @instance_name 
                AND snap.snapshot_time_id BETWEEN @start_snapshot_time_id AND @end_snapshot_time_id
                AND stats.collection_time BETWEEN @start_time AND @end_time 
                AND ISNULL (sql.sample_database_name, '') LIKE ISNULL (@database_name, '%')
                AND (@query_hash_str IS NULL OR stats.query_hash = snapshots.fn_hexstrtovarbin(@query_hash_str))
                AND ISNULL (sql.sample_database_name, '') LIKE ISNULL (@database_name, '%')
            GROUP BY 
                stats.query_hash, 
                -- If we're not zoomed in to view the plans for a particular query, we want to roll the stats 
                -- up at the query level, but not down to the query plan level. The CASE below will only group 
                -- by plan hash if we have been asked to filter on a particular query_hash value. 
                CASE
                    WHEN @query_hash_str IS NULL THEN 1
                    ELSE stats.query_plan_hash 
                END
        ) AS grouped_stats
        ORDER BY 1 DESC -- Sort by the calculated charted_value column
    ) AS grouped_stats_with_charted_value
    LEFT OUTER JOIN custom_snapshots.query_hash_text_and_plan AS sql 
        ON grouped_stats_with_charted_value.query_hash = sql.query_hash AND grouped_stats_with_charted_value.query_plan_hash = sql.query_plan_hash 
    ORDER BY query_rank ASC;
END;
GO



--
-- snapshots.rpt_query_plan_hash_stats_timeline
--  Returns stats for the top 10 plans observed for a query within a specified time interval. 
--  Output is intended for plotting this over a time window. 
-- Parameters: 
--    @instance_name - SQL Server instance name
--    @start_time - (Optional) time window start (UTC)
--    @end_time - time window end (UTC)
--    @time_window_size - Number of intervals in the time window (provide if @start_time is NULL)
--    @time_interval_min - Number of minutes in each interval (provide if @start_time is NULL)
--    @sql_handle_str - String representation of a SQL handle (e.g. "0x1F27BC...")
--    @plan_handle_str - (Optional) String representation of a plan handle (e.g. "0x1F27BC..."). Omit to see stats for all plans
--    @plan_creation_time - (Optional) Plan creation time
--    @statement_start_offset - Start offset (byte count) for the statement within the batch identified by @sql_handle_str
--    @statement_start_offset - End offset (byte count) for the statement within the batch identified by @sql_handle_str
--    @order_by_criteria - (Optional) 'CPU' (default), 'Physical Reads', 'Logical Writes', 'I/O' (reads+writes), or 'Duration'
--
IF (NOT OBJECT_ID(N'custom_snapshots.rpt_query_plan_hash_stats_timeline', 'P') IS NULL)
BEGIN
    PRINT 'Dropping procedure custom_snapshots.rpt_query_plan_hash_stats_timeline ...'
    DROP PROCEDURE custom_snapshots.rpt_query_plan_hash_stats_timeline
END
GO 

PRINT 'Creating procedure custom_snapshots.rpt_query_plan_hash_stats_timeline ...'
GO
CREATE PROCEDURE custom_snapshots.rpt_query_plan_hash_stats_timeline
    @instance_name          SYSNAME,
    @start_time             DATETIME = NULL,
    @end_time               DATETIME = NULL,
    @num_query_plans        INT = 1000, 
    @time_window_size       SMALLINT = NULL,
    @time_interval_min      SMALLINT = 1, 
    @order_by_criteria      VARCHAR(30) = 'CPU', 
    -- BINARY values passed in hex string form so the proc can be called from RS 
    @query_hash_str         VARCHAR(32), 
    @query_plan_hash_str    VARCHAR(32) = NULL
AS
BEGIN
    SET NOCOUNT ON;

    -- @end_time should never be NULL when we are called from the Query Hash Stats report
    -- Convert time (DATETIMEOFFSET) to a UTC DATETIME
    IF (@end_time IS NULL)
    BEGIN
        SET @end_time = CONVERT (DATETIME, SWITCHOFFSET (CAST ((SELECT MAX(snapshot_time) FROM core.snapshots) AS DATETIMEOFFSET(7)), '+00:00'));
    END;

    IF (@start_time IS NULL)
    BEGIN
        -- If time_window_size and time_interval_min are set use them to determine the start time
        -- Otherwise use the earliest available snapshot_time
        IF @time_window_size IS NOT NULL AND @time_interval_min IS NOT NULL
        BEGIN
            SET @start_time = DATEADD(minute, @time_window_size * @time_interval_min * -1.0, @end_time);
        END
        ELSE
        BEGIN
            -- Convert min snapshot_time (DATETIMEOFFSET) to a UTC DATETIME
            SET @start_time = CONVERT (DATETIME, SWITCHOFFSET (CAST ((SELECT MIN(snapshot_time) FROM core.snapshots) AS DATETIMEOFFSET(7)), '+00:00'));
        END;
    END;

    DECLARE @end_snapshot_time_id INT;
    SELECT @end_snapshot_time_id = MAX(snapshot_time_id) FROM core.snapshots WHERE snapshot_time <= @end_time;

    DECLARE @start_snapshot_time_id INT;
    SELECT @start_snapshot_time_id = MIN(snapshot_time_id) FROM core.snapshots WHERE snapshot_time >= @start_time;

    -- SQL and plan handles are passed in as a hex-formatted string b/c of an RS limitation. Convert to VARBINARY. 
    DECLARE @query_hash BINARY(8);
    DECLARE @query_plan_hash BINARY(8);
    SET @query_hash = snapshots.fn_hexstrtovarbin (@query_hash_str);
    
    IF @query_plan_hash_str IS NOT NULL
    BEGIN
        SET @query_plan_hash = snapshots.fn_hexstrtovarbin (@query_plan_hash_str);
    END;
    
    -- Get the most expensive plan fingerprints, and assign a plan number to each of them
    SELECT TOP (@num_query_plans) 
        *, 
        ROW_NUMBER () OVER (ORDER BY total_plan_stats.total_charted_value) AS plan_number
    INTO #top_plans 
    FROM
    (
        SELECT 
            qhs.query_hash, 
            qhs.query_plan_hash, 
            master.dbo.fn_varbintohexstr (qhs.query_hash) AS query_hash_str, 
            master.dbo.fn_varbintohexstr (qhs.query_plan_hash) AS query_plan_hash_str, 
            CASE 
                WHEN @order_by_criteria LIKE 'CPU' THEN SUM (qhs.interval_cpu_time_ms)
                WHEN @order_by_criteria LIKE 'Duration' THEN SUM (qhs.interval_execution_time_ms)
                WHEN @order_by_criteria LIKE 'I/O' THEN SUM (qhs.interval_logical_writes) + SUM (qhs.interval_physical_reads)
                WHEN @order_by_criteria LIKE 'Logical Reads' THEN SUM (qhs.interval_logical_reads)
                WHEN @order_by_criteria LIKE 'Physical Reads' THEN SUM (qhs.interval_physical_reads)
                WHEN @order_by_criteria LIKE 'Logical Writes' THEN SUM (qhs.interval_logical_writes)
                ELSE SUM (qhs.interval_cpu_time_ms)
            END AS total_charted_value
        FROM custom_snapshots.query_hash_stats AS qhs
        INNER JOIN core.snapshots AS snap ON qhs.snapshot_id = snap.snapshot_id 
        WHERE
            qhs.query_hash = @query_hash 
            AND snap.instance_name = @instance_name 
            AND snap.snapshot_time_id BETWEEN @start_snapshot_time_id AND @end_snapshot_time_id
            AND qhs.collection_time BETWEEN @start_time AND @end_time
        GROUP BY qhs.query_hash, qhs.query_plan_hash 
    ) AS total_plan_stats
    ORDER BY total_plan_stats.total_charted_value;

    SELECT TOP (@num_query_plans)
        -- NULL charted_values are possible.  e.g. We don't have an avg duration yet for a long-running 
        -- in-progress query because the plan hasn't shown up in dm_exec_query_stats yet.  But user can 
        -- still choose to chart average duration.  Use ISNULL to conert these to 0 because Reporting 
        -- Services will fail rendering with an "invalid parameter" error if any NULL values show up in 
        -- a chart's data series. 
        -- TODO: Removed fix (ISNULL(..., 0) replaced with ISNULL (..., NULL).  This caused seesaw chart 
        -- patterns for queries that are executed less freqently than once/5 min.  Need to look for a 
        -- different solution.  (On the upside, RS does allow NULLs in chart data series... maybe at 
        -- least a single non-NULL value is required?)
        ISNULL (CASE 
            WHEN @order_by_criteria LIKE 'CPU' THEN query_plan_stats.average_cpu_time_ms 
            WHEN @order_by_criteria LIKE 'Duration' THEN query_plan_stats.average_execution_time_ms 
            WHEN @order_by_criteria LIKE 'I/O' THEN query_plan_stats.average_physical_reads + query_plan_stats.average_logical_writes 
            WHEN @order_by_criteria LIKE 'Logical Reads' THEN query_plan_stats.average_logical_reads 
            WHEN @order_by_criteria LIKE 'Physical Reads' THEN query_plan_stats.average_physical_reads 
            WHEN @order_by_criteria LIKE 'Logical Writes' THEN query_plan_stats.average_logical_writes 
            ELSE query_plan_stats.average_cpu_time_ms
        END, NULL) AS charted_value, 
        query_plan_stats.*, 
        plans.first_collection_time, 
        plans.sample_creation_time, 
        plans.sample_database_name, 
        plans.sample_object_name, 
        plans.sample_plan_generation_number, 
        plans.sample_plan_handle, 
        plans.sample_query_plan, 
        plans.sample_sql_handle, 
        plans.sample_statement_start_offset, 
        plans.sample_statement_end_offset, 
        plans.sample_statement_text 
    FROM
    (
        SELECT 
            #top_plans.plan_number, 
            qhs.*
        FROM 
        (
            -- Work around a RS chart limitation (single data points do not plot on line charts). 
            -- Fake a second data point shortly after the first so even short-lived plans will 
            -- get plotted. 
            SELECT 
                *, 
                CONVERT (DATETIME, SWITCHOFFSET (collection_time, '+00:00')) AS collection_time_chart 
            FROM custom_snapshots.query_hash_stats 
            UNION ALL
            SELECT 
                *, 
                CONVERT (DATETIME, SWITCHOFFSET (DATEADD (second, 15, collection_time), '+00:00')) AS collection_time_chart 
            FROM custom_snapshots.query_hash_stats 
        ) AS qhs
        INNER JOIN core.snapshots AS snap ON qhs.snapshot_id = snap.snapshot_id 
        INNER JOIN #top_plans ON #top_plans.query_hash = @query_hash AND #top_plans.query_plan_hash = qhs.query_plan_hash 
        WHERE
            qhs.query_hash = @query_hash 
            AND (@query_plan_hash IS NULL OR @query_plan_hash = qhs.query_plan_hash)
            AND snap.instance_name = @instance_name 
            AND snap.snapshot_time_id BETWEEN @start_snapshot_time_id AND @end_snapshot_time_id
            AND qhs.collection_time BETWEEN @start_time AND @end_time
    ) AS query_plan_stats
    LEFT OUTER JOIN custom_snapshots.query_hash_text_and_plan AS plans ON plans.query_hash = @query_hash AND plans.query_plan_hash = query_plan_stats.query_plan_hash 
    ORDER BY query_plan_stats.plan_number, query_plan_stats.collection_time_chart;
END;
GO


-- TODO: add well-known queries to ignore/suppress from reports.  e.g. from profiler: "select * from OpenRowset(TABLE TRCDATA, @traceid, @records)"