diff --git a/config/config.go b/config/config.go index 2adb437..2d71817 100644 --- a/config/config.go +++ b/config/config.go @@ -10,7 +10,7 @@ import ( ) const ( - ReleemAgentVersion = "1.18.1" + ReleemAgentVersion = "1.19.0" ) var ( @@ -18,27 +18,28 @@ var ( ) type Config struct { - Debug bool `hcl:"debug"` - Env string `hcl:"env"` - Hostname string `hcl:"hostname"` - ApiKey string `hcl:"apikey"` - TimePeriodSeconds time.Duration `hcl:"interval_seconds"` - ReadConfigSeconds time.Duration `hcl:"interval_read_config_seconds"` - GenerateConfigSeconds time.Duration `hcl:"interval_generate_config_seconds"` - MysqlPassword string `hcl:"mysql_password"` - MysqlUser string `hcl:"mysql_user"` - MysqlHost string `hcl:"mysql_host"` - MysqlPort string `hcl:"mysql_port"` - MysqlSslMode bool `hcl:"mysql_ssl_mode"` - CommandRestartService string `hcl:"mysql_restart_service"` - MysqlConfDir string `hcl:"mysql_cnf_dir"` - ReleemConfDir string `hcl:"releem_cnf_dir"` - ReleemDir string `hcl:"releem_dir"` - MemoryLimit int `hcl:"memory_limit"` - InstanceType string `hcl:"instance_type"` - AwsRegion string `hcl:"aws_region"` - AwsRDSDB string `hcl:"aws_rds_db"` - CollectExplain bool `hcl:"collect_explain"` + Debug bool `hcl:"debug"` + Env string `hcl:"env"` + Hostname string `hcl:"hostname"` + ApiKey string `hcl:"apikey"` + MetricsPeriod time.Duration `hcl:"interval_seconds"` + ReadConfigPeriod time.Duration `hcl:"interval_read_config_seconds"` + GenerateConfigPeriod time.Duration `hcl:"interval_generate_config_seconds"` + QueryOptimizationPeriod time.Duration `hcl:"interval_query_optimization_seconds"` + MysqlPassword string `hcl:"mysql_password"` + MysqlUser string `hcl:"mysql_user"` + MysqlHost string `hcl:"mysql_host"` + MysqlPort string `hcl:"mysql_port"` + MysqlSslMode bool `hcl:"mysql_ssl_mode"` + CommandRestartService string `hcl:"mysql_restart_service"` + MysqlConfDir string `hcl:"mysql_cnf_dir"` + ReleemConfDir string `hcl:"releem_cnf_dir"` + ReleemDir string `hcl:"releem_dir"` + MemoryLimit int `hcl:"memory_limit"` + InstanceType string `hcl:"instance_type"` + AwsRegion string `hcl:"aws_region"` + AwsRDSDB string `hcl:"aws_rds_db"` + QueryOptimization bool `hcl:"query_optimization"` } func LoadConfig(filename string, logger logging.Logger) (*Config, error) { @@ -59,14 +60,17 @@ func LoadConfigFromString(data string, logger logging.Logger) (*Config, error) { if err != nil { return nil, err } - if config.TimePeriodSeconds == 0 { - config.TimePeriodSeconds = 60 + if config.MetricsPeriod == 0 { + config.MetricsPeriod = 60 } - if config.ReadConfigSeconds == 0 { - config.ReadConfigSeconds = 3600 + if config.ReadConfigPeriod == 0 { + config.ReadConfigPeriod = 3600 } - if config.GenerateConfigSeconds == 0 { - config.GenerateConfigSeconds = 43200 + if config.GenerateConfigPeriod == 0 { + config.GenerateConfigPeriod = 43200 + } + if config.QueryOptimizationPeriod == 0 { + config.QueryOptimizationPeriod = 3600 } if config.MysqlHost == "" { config.MysqlHost = "127.0.0.1" diff --git a/current_version_agent b/current_version_agent index 5ce8b39..c1af674 100644 --- a/current_version_agent +++ b/current_version_agent @@ -1 +1 @@ -1.18.1 \ No newline at end of file +1.19.0 \ No newline at end of file diff --git a/docker/releem.conf.tpl b/docker/releem.conf.tpl index 252733e..fe28621 100644 --- a/docker/releem.conf.tpl +++ b/docker/releem.conf.tpl @@ -8,18 +8,22 @@ hostname="${RELEEM_HOSTNAME}" # Defaults to 0, Mysql memory usage limit. memory_limit=${MEMORY_LIMIT:-0} -# TimePeriodSeconds time.Duration `hcl:"interval_seconds"` +# MetricsPeriod time.Duration `hcl:"interval_seconds"` # Defaults to 30 seconds, how often metrics are collected. interval_seconds=60 -# ReadConfigSeconds time.Duration `hcl:"interval_read_config_seconds"` +# ReadConfigPeriod time.Duration `hcl:"interval_read_config_seconds"` # Defaults to 3600 seconds, how often to update the values from the config. interval_read_config_seconds=3600 -# GenerateConfigSeconds time.Duration `hcl:"interval_generate_config_seconds"` +# GenerateConfigPeriod time.Duration `hcl:"interval_generate_config_seconds"` # Defaults to 43200 seconds, how often to generate recommend the config. interval_generate_config_seconds=${RELEEM_INTERVAL_COLLECT_ALL_METRICS:-43200} +# QueryOptimization time.Duration `hcl:"interval_query_optimization_seconds"` +# Defaults to 3600 seconds, how often query metrics are collected. +interval_query_optimization_seconds=3600 + # MysqlUser string`hcl:"mysql_user"` # Mysql user name for collection metrics. mysql_user="${DB_USER:-releem}" @@ -68,4 +72,6 @@ env="${RELEEM_ENV:-prod}" # Releem Debug messages debug=${RELEEM_DEBUG:-false} -collect_explain=${RELEEM_QUERY_OPTIMIZATION:-false} \ No newline at end of file +# Collect Explain string `hcl:"query_optimization"` +# Releem collect explain for query +query_optimization=${RELEEM_QUERY_OPTIMIZATION:-false} \ No newline at end of file diff --git a/install.sh b/install.sh index b3e079f..43512df 100644 --- a/install.sh +++ b/install.sh @@ -1,5 +1,5 @@ #!/bin/bash -# install.sh - Version 1.18.1 +# install.sh - Version 1.19.0 # (C) Releem, Inc 2022 # All rights reserved @@ -7,7 +7,7 @@ # using the package manager. set -e -install_script_version=1.18.1 +install_script_version=1.19.0 logfile="releem-install.log" WORKDIR="/opt/releem" @@ -321,6 +321,19 @@ else else printf "\033[31m\n This database version is too old.\033[0m\n" fi + + if mysql ${root_connection_string} --user=root --password=${RELEEM_MYSQL_ROOT_PASSWORD} -Be "GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO '${RELEEM_MYSQL_LOGIN}'@'${mysql_user_host}';" + then + echo "Successfully GRANT" > /dev/null + else + if mysql ${root_connection_string} --user=root --password=${RELEEM_MYSQL_ROOT_PASSWORD} -Be "GRANT SUPER ON *.* TO '${RELEEM_MYSQL_LOGIN}'@'${mysql_user_host}';" + then + echo "Successfully GRANT" > /dev/null + else + printf "\033[31m\n Error granting privileges to apply without restarting.\033[0m\n" + fi + fi + if [ -n $RELEEM_QUERY_OPTIMIZATION ]; then mysql ${root_connection_string} --user=root --password=${RELEEM_MYSQL_ROOT_PASSWORD} -Be "GRANT SELECT ON *.* TO '${RELEEM_MYSQL_LOGIN}'@'${mysql_user_host}';" @@ -449,7 +462,7 @@ if [ -n "$RELEEM_MYSQL_SSL_MODE" ]; then echo "mysql_ssl_mode=$RELEEM_MYSQL_SSL_MODE" | $sudo_cmd tee -a $CONF >/dev/null fi if [ -n "$RELEEM_QUERY_OPTIMIZATION" ]; then - echo "collect_explain=$RELEEM_QUERY_OPTIMIZATION" | $sudo_cmd tee -a $CONF >/dev/null + echo "query_optimization=$RELEEM_QUERY_OPTIMIZATION" | $sudo_cmd tee -a $CONF >/dev/null fi echo "interval_seconds=60" | $sudo_cmd tee -a $CONF >/dev/null echo "interval_read_config_seconds=3600" | $sudo_cmd tee -a $CONF >/dev/null diff --git a/main.go b/main.go index dc43ba7..9ebcdd2 100644 --- a/main.go +++ b/main.go @@ -47,8 +47,8 @@ type Service struct { // Manage by daemon commands or run the daemon func (service *Service) Manage(logger logging.Logger, configFile string, command []string, TypeConfiguration string, AgentEvent string, AgentTask string) (string, error) { - var gatherers, gatherers_configuration []m.MetricsGatherer - var Mode m.Mode + var gatherers, gatherers_configuration, gatherers_query_optimization []m.MetricsGatherer + var Mode m.ModeT var configuration *config.Config usage := "Usage: myservice install | remove | start | stop | status" @@ -156,14 +156,18 @@ func (service *Service) Manage(logger logging.Logger, configFile string, command defer config.DB.Close() //Init repeaters - repeaters := make(map[string]m.MetricsRepeater) - repeaters["Metrics"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "Metrics", ModeType: ""})) - repeaters["Configurations"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, Mode)) - repeaters["Event"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, Mode)) - repeaters["TaskGet"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "TaskGet", ModeType: ""})) - repeaters["TaskStatus"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "TaskStatus", ModeType: ""})) - repeaters["TaskSet"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, Mode)) - repeaters["GetConfigurationJson"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "Configurations", ModeType: "get-json"})) + // repeaters := make(map[string]m.MetricsRepeater) + // repeaters["Metrics"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "Metrics", ModeType: ""})) + // repeaters["Configurations"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, Mode)) + // repeaters["Event"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, Mode)) + // repeaters["TaskGet"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "TaskGet", ModeType: ""})) + // repeaters["TaskStatus"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "TaskStatus", ModeType: ""})) + // repeaters["TaskSet"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, Mode)) + // repeaters["GetConfigurationJson"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "Configurations", ModeType: "get-json"})) + // repeaters["QueryOptimization"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "Metrics", ModeType: "QuerysOptimization"})) + // repeaters["QueriesOptimization"] = m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration, m.Mode{Name: "TaskSet", ModeType: "queries_optimization"})) + //var repeaters m.MetricsRepeater + repeaters := m.MetricsRepeater(r.NewReleemConfigurationsRepeater(configuration)) //Init gatherers gatherers = append(gatherers, @@ -172,10 +176,9 @@ func (service *Service) Manage(logger logging.Logger, configFile string, command m.NewDbMetricsBaseGatherer(nil, configuration), m.NewAgentMetricsGatherer(nil, configuration)) gatherers_configuration = append(gatherers_configuration, m.NewDbMetricsGatherer(nil, configuration)) - if Mode.Name == "TaskSet" && Mode.ModeType == "collect_queries" { - gatherers = append(gatherers, m.NewDbCollectQueries(nil, configuration)) - } - m.RunWorker(gatherers, gatherers_configuration, repeaters, nil, configuration, configFile, Mode) + gatherers_query_optimization = append(gatherers_query_optimization, m.NewDbCollectQueriesOptimization(nil, configuration)) + + m.RunWorker(gatherers, gatherers_configuration, gatherers_query_optimization, repeaters, nil, configuration, configFile, Mode) // never happen, but need to complete code return usage, nil @@ -187,7 +190,7 @@ func main() { configFile := flag.String("config", "/opt/releem/releem.conf", "Releem agent config") SetConfigRun := flag.Bool("f", false, "Releem agent generate config") - GetConfigRun := flag.Bool("c", false, "Releem agent generate config") + GetConfigRun := flag.Bool("c", false, "Releem agent get config") AgentEvent := flag.String("event", "", "Releem agent type event") AgentTask := flag.String("task", "", "Releem agent task name") diff --git a/metrics/Metrics.go b/metrics/Metrics.go index ceb5025..310e0b2 100644 --- a/metrics/Metrics.go +++ b/metrics/Metrics.go @@ -18,7 +18,7 @@ type MetricValue struct { } type MetricGroupValue map[string]interface{} -type Mode struct { +type ModeT struct { Name string ModeType string } @@ -78,7 +78,7 @@ type MetricsGatherer interface { } type MetricsRepeater interface { - ProcessMetrics(context MetricContext, metrics Metrics) (interface{}, error) + ProcessMetrics(context MetricContext, metrics Metrics, Mode ModeT) (interface{}, error) } func MapJoin(map1, map2 MetricGroupValue) MetricGroupValue { diff --git a/metrics/dbCollectQueries.go b/metrics/dbCollectQueries.go index 22fce27..e8e917b 100644 --- a/metrics/dbCollectQueries.go +++ b/metrics/dbCollectQueries.go @@ -11,29 +11,29 @@ import ( "github.com/advantageous/go-logback/logging" ) -type DbCollectQueries struct { +type DbCollectQueriesOptimization struct { logger logging.Logger configuration *config.Config } -func NewDbCollectQueries(logger logging.Logger, configuration *config.Config) *DbCollectQueries { +func NewDbCollectQueriesOptimization(logger logging.Logger, configuration *config.Config) *DbCollectQueriesOptimization { if logger == nil { if configuration.Debug { - logger = logging.NewSimpleDebugLogger("DbCollectQueries") + logger = logging.NewSimpleDebugLogger("DbCollectQueriesOptimization") } else { - logger = logging.NewSimpleLogger("DbCollectQueries") + logger = logging.NewSimpleLogger("DbCollectQueriesOptimization") } } - return &DbCollectQueries{ + return &DbCollectQueriesOptimization{ logger: logger, configuration: configuration, } } -func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { - defer HandlePanic(DbCollectQueries.configuration, DbCollectQueries.logger) +func (DbCollectQueriesOptimization *DbCollectQueriesOptimization) GetMetrics(metrics *Metrics) error { + defer HandlePanic(DbCollectQueriesOptimization.configuration, DbCollectQueriesOptimization.logger) { var output []MetricGroupValue @@ -46,18 +46,18 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { if err != nil { if err != sql.ErrNoRows { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) } rows, err = config.DB.Query("SELECT IFNULL(schema_name, 'NULL') as schema_name, IFNULL(digest, 'NULL') as query_id, IFNULL(digest_text, 'NULL') as query, count_star as calls, round(avg_timer_wait/1000000, 0) as avg_time_us, round(SUM_TIMER_WAIT/1000000, 0) as sum_time_us, IFNULL(SUM_LOCK_TIME, 'NULL') as SUM_LOCK_TIME, IFNULL(SUM_ERRORS, 'NULL') as SUM_ERRORS, IFNULL(SUM_WARNINGS, 'NULL') as SUM_WARNINGS, IFNULL(SUM_ROWS_AFFECTED, 'NULL') as SUM_ROWS_AFFECTED, IFNULL(SUM_ROWS_SENT, 'NULL') as SUM_ROWS_SENT, IFNULL(SUM_ROWS_EXAMINED, 'NULL') as SUM_ROWS_EXAMINED, IFNULL(SUM_CREATED_TMP_DISK_TABLES, 'NULL') as SUM_CREATED_TMP_DISK_TABLES, IFNULL(SUM_CREATED_TMP_TABLES, 'NULL') as SUM_CREATED_TMP_TABLES, IFNULL(SUM_SELECT_FULL_JOIN, 'NULL') as SUM_SELECT_FULL_JOIN, IFNULL(SUM_SELECT_FULL_RANGE_JOIN, 'NULL') as SUM_SELECT_FULL_RANGE_JOIN, IFNULL(SUM_SELECT_RANGE, 'NULL') as SUM_SELECT_RANGE, IFNULL(SUM_SELECT_RANGE_CHECK, 'NULL') as SUM_SELECT_RANGE_CHECK, IFNULL(SUM_SELECT_SCAN, 'NULL') as SUM_SELECT_SCAN, IFNULL(SUM_SORT_MERGE_PASSES, 'NULL') as SUM_SORT_MERGE_PASSES, IFNULL(SUM_SORT_RANGE, 'NULL') as SUM_SORT_RANGE, IFNULL(SUM_SORT_ROWS, 'NULL') as SUM_SORT_ROWS, IFNULL(SUM_SORT_SCAN, 'NULL') as SUM_SORT_SCAN, IFNULL(SUM_NO_INDEX_USED, 'NULL') as SUM_NO_INDEX_USED, IFNULL(SUM_NO_GOOD_INDEX_USED, 'NULL') as SUM_NO_GOOD_INDEX_USED FROM performance_schema.events_statements_summary_by_digest") if err != nil { if err != sql.ErrNoRows { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) } } else { for rows.Next() { err := rows.Scan(&schema_name, &query_id, &query, &calls, &avg_time_us, &sum_time_us, &SUM_LOCK_TIME, &SUM_ERRORS, &SUM_WARNINGS, &SUM_ROWS_AFFECTED, &SUM_ROWS_SENT, &SUM_ROWS_EXAMINED, &SUM_CREATED_TMP_DISK_TABLES, &SUM_CREATED_TMP_TABLES, &SUM_SELECT_FULL_JOIN, &SUM_SELECT_FULL_RANGE_JOIN, &SUM_SELECT_RANGE, &SUM_SELECT_RANGE_CHECK, &SUM_SELECT_SCAN, &SUM_SORT_MERGE_PASSES, &SUM_SORT_RANGE, &SUM_SORT_ROWS, &SUM_SORT_SCAN, &SUM_NO_INDEX_USED, &SUM_NO_GOOD_INDEX_USED) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } output_digest[query_id] = MetricGroupValue{"schema_name": schema_name, "query_id": query_id, "query": query, "calls": calls, "avg_time_us": avg_time_us, "sum_time_us": sum_time_us, "SUM_LOCK_TIME": SUM_LOCK_TIME, "SUM_ERRORS": SUM_ERRORS, "SUM_WARNINGS": SUM_WARNINGS, "SUM_ROWS_AFFECTED": SUM_ROWS_AFFECTED, "SUM_ROWS_SENT": SUM_ROWS_SENT, "SUM_ROWS_EXAMINED": SUM_ROWS_EXAMINED, "SUM_CREATED_TMP_DISK_TABLES": SUM_CREATED_TMP_DISK_TABLES, "SUM_CREATED_TMP_TABLES": SUM_CREATED_TMP_TABLES, "SUM_SELECT_FULL_JOIN": SUM_SELECT_FULL_JOIN, "SUM_SELECT_FULL_RANGE_JOIN": SUM_SELECT_FULL_RANGE_JOIN, "SUM_SELECT_RANGE": SUM_SELECT_RANGE, "SUM_SELECT_RANGE_CHECK": SUM_SELECT_RANGE_CHECK, "SUM_SELECT_SCAN": SUM_SELECT_SCAN, "SUM_SORT_MERGE_PASSES": SUM_SORT_MERGE_PASSES, "SUM_SORT_RANGE": SUM_SORT_RANGE, "SUM_SORT_ROWS": SUM_SORT_ROWS, "SUM_SORT_SCAN": SUM_SORT_SCAN, "SUM_NO_INDEX_USED": SUM_NO_INDEX_USED, "SUM_NO_GOOD_INDEX_USED": SUM_NO_GOOD_INDEX_USED} @@ -68,14 +68,14 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { for rows.Next() { err := rows.Scan(&schema_name, &query_id, &query, &query_text, &calls, &avg_time_us, &sum_time_us, &SUM_LOCK_TIME, &SUM_ERRORS, &SUM_WARNINGS, &SUM_ROWS_AFFECTED, &SUM_ROWS_SENT, &SUM_ROWS_EXAMINED, &SUM_CREATED_TMP_DISK_TABLES, &SUM_CREATED_TMP_TABLES, &SUM_SELECT_FULL_JOIN, &SUM_SELECT_FULL_RANGE_JOIN, &SUM_SELECT_RANGE, &SUM_SELECT_RANGE_CHECK, &SUM_SELECT_SCAN, &SUM_SORT_MERGE_PASSES, &SUM_SORT_RANGE, &SUM_SORT_ROWS, &SUM_SORT_SCAN, &SUM_NO_INDEX_USED, &SUM_NO_GOOD_INDEX_USED) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } output_digest[query_id] = MetricGroupValue{"schema_name": schema_name, "query_id": query_id, "query": query, "query_text": query_text, "calls": calls, "avg_time_us": avg_time_us, "sum_time_us": sum_time_us, "SUM_LOCK_TIME": SUM_LOCK_TIME, "SUM_ERRORS": SUM_ERRORS, "SUM_WARNINGS": SUM_WARNINGS, "SUM_ROWS_AFFECTED": SUM_ROWS_AFFECTED, "SUM_ROWS_SENT": SUM_ROWS_SENT, "SUM_ROWS_EXAMINED": SUM_ROWS_EXAMINED, "SUM_CREATED_TMP_DISK_TABLES": SUM_CREATED_TMP_DISK_TABLES, "SUM_CREATED_TMP_TABLES": SUM_CREATED_TMP_TABLES, "SUM_SELECT_FULL_JOIN": SUM_SELECT_FULL_JOIN, "SUM_SELECT_FULL_RANGE_JOIN": SUM_SELECT_FULL_RANGE_JOIN, "SUM_SELECT_RANGE": SUM_SELECT_RANGE, "SUM_SELECT_RANGE_CHECK": SUM_SELECT_RANGE_CHECK, "SUM_SELECT_SCAN": SUM_SELECT_SCAN, "SUM_SORT_MERGE_PASSES": SUM_SORT_MERGE_PASSES, "SUM_SORT_RANGE": SUM_SORT_RANGE, "SUM_SORT_ROWS": SUM_SORT_ROWS, "SUM_SORT_SCAN": SUM_SORT_SCAN, "SUM_NO_INDEX_USED": SUM_NO_INDEX_USED, "SUM_NO_GOOD_INDEX_USED": SUM_NO_GOOD_INDEX_USED} } - if DbCollectQueries.configuration.CollectExplain { - CollectionExplain(output_digest, "sum_time_us", DbCollectQueries.logger, DbCollectQueries.configuration) - CollectionExplain(output_digest, "avg_time_us", DbCollectQueries.logger, DbCollectQueries.configuration) + if DbCollectQueriesOptimization.configuration.QueryOptimization { + CollectionExplain(output_digest, "sum_time_us", DbCollectQueriesOptimization.logger, DbCollectQueriesOptimization.configuration) + CollectionExplain(output_digest, "avg_time_us", DbCollectQueriesOptimization.logger, DbCollectQueriesOptimization.configuration) } } for _, value := range output_digest { @@ -105,12 +105,12 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { rows, err := config.DB.Query("SELECT IFNULL(TABLE_NAME, 'NULL') as TABLE_NAME, IFNULL(TABLE_SCHEMA, 'NULL') as TABLE_SCHEMA, IFNULL(ENGINE, 'NULL') as ENGINE, IFNULL(TABLE_ROWS, 'NULL') as TABLE_ROWS, IFNULL(AVG_ROW_LENGTH, 'NULL') as AVG_ROW_LENGTH, IFNULL(DATA_LENGTH, 'NULL') as DATA_LENGTH, IFNULL(INDEX_LENGTH, 'NULL') as INDEX_LENGTH, IFNULL(TABLE_COLLATION, 'NULL') as TABLE_COLLATION FROM information_schema.tables") if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) } else { for rows.Next() { err := rows.Scan(&information_schema_table.TABLE_NAME, &information_schema_table.TABLE_SCHEMA, &information_schema_table.ENGINE, &information_schema_table.TABLE_ROWS, &information_schema_table.AVG_ROW_LENGTH, &information_schema_table.DATA_LENGTH, &information_schema_table.INDEX_LENGTH, &information_schema_table.TABLE_COLLATION) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } table := MetricGroupValue{"TABLE_NAME": information_schema_table.TABLE_NAME, "TABLE_SCHEMA": information_schema_table.TABLE_SCHEMA, "ENGINE": information_schema_table.ENGINE, "TABLE_ROWS": information_schema_table.TABLE_ROWS, "AVG_ROW_LENGTH": information_schema_table.AVG_ROW_LENGTH, "DATA_LENGTH": information_schema_table.DATA_LENGTH, "INDEX_LENGTH": information_schema_table.INDEX_LENGTH, "TABLE_COLLATION": information_schema_table.TABLE_COLLATION} @@ -138,12 +138,12 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { rows, err := config.DB.Query("SELECT IFNULL(TABLE_SCHEMA, 'NULL') as TABLE_SCHEMA, IFNULL(TABLE_NAME, 'NULL') as TABLE_NAME, IFNULL(COLUMN_NAME, 'NULL') as COLUMN_NAME, IFNULL(ORDINAL_POSITION, 'NULL') as ORDINAL_POSITION, IFNULL(IS_NULLABLE, 'NULL') as IS_NULLABLE, IFNULL(DATA_TYPE, 'NULL') as DATA_TYPE, IFNULL(CHARACTER_MAXIMUM_LENGTH, 'NULL') as CHARACTER_MAXIMUM_LENGTH, IFNULL(NUMERIC_PRECISION, 'NULL') as NUMERIC_PRECISION, IFNULL(NUMERIC_SCALE, 'NULL') as NUMERIC_SCALE, IFNULL(CHARACTER_SET_NAME, 'NULL') as CHARACTER_SET_NAME FROM information_schema.columns") if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) } else { for rows.Next() { err := rows.Scan(&information_schema_column.TABLE_SCHEMA, &information_schema_column.TABLE_NAME, &information_schema_column.COLUMN_NAME, &information_schema_column.ORDINAL_POSITION, &information_schema_column.IS_NULLABLE, &information_schema_column.DATA_TYPE, &information_schema_column.CHARACTER_MAXIMUM_LENGTH, &information_schema_column.NUMERIC_PRECISION, &information_schema_column.NUMERIC_SCALE, &information_schema_column.CHARACTER_SET_NAME) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } column := MetricGroupValue{"TABLE_SCHEMA": information_schema_column.TABLE_SCHEMA, "TABLE_NAME": information_schema_column.TABLE_NAME, "COLUMN_NAME": information_schema_column.COLUMN_NAME, "ORDINAL_POSITION": information_schema_column.ORDINAL_POSITION, "IS_NULLABLE": information_schema_column.IS_NULLABLE, "DATA_TYPE": information_schema_column.DATA_TYPE, "CHARACTER_MAXIMUM_LENGTH": information_schema_column.CHARACTER_MAXIMUM_LENGTH, "NUMERIC_PRECISION": information_schema_column.NUMERIC_PRECISION, "NUMERIC_SCALE": information_schema_column.NUMERIC_SCALE, "CHARACTER_SET_NAME": information_schema_column.CHARACTER_SET_NAME} @@ -174,15 +174,15 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { rows, err := config.DB.Query("SELECT IFNULL(TABLE_SCHEMA, 'NULL') as TABLE_SCHEMA, IFNULL(TABLE_NAME, 'NULL') as TABLE_NAME, IFNULL(INDEX_NAME, 'NULL') as INDEX_NAME, IFNULL(NON_UNIQUE, 'NULL') as NON_UNIQUE, IFNULL(SEQ_IN_INDEX, 'NULL') as SEQ_IN_INDEX, IFNULL(COLUMN_NAME, 'NULL') as COLUMN_NAME, IFNULL(COLLATION, 'NULL') as COLLATION, IFNULL(CARDINALITY, 'NULL') as CARDINALITY, IFNULL(SUB_PART, 'NULL') as SUB_PART, IFNULL(PACKED, 'NULL') as PACKED, IFNULL(NULLABLE, 'NULL') as NULLABLE, IFNULL(INDEX_TYPE, 'NULL') as INDEX_TYPE, IFNULL(EXPRESSION, 'NULL') as EXPRESSION FROM information_schema.statistics") if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) rows, err = config.DB.Query("SELECT IFNULL(TABLE_SCHEMA, 'NULL') as TABLE_SCHEMA, IFNULL(TABLE_NAME, 'NULL') as TABLE_NAME, IFNULL(INDEX_NAME, 'NULL') as INDEX_NAME, IFNULL(NON_UNIQUE, 'NULL') as NON_UNIQUE, IFNULL(SEQ_IN_INDEX, 'NULL') as SEQ_IN_INDEX, IFNULL(COLUMN_NAME, 'NULL') as COLUMN_NAME, IFNULL(COLLATION, 'NULL') as COLLATION, IFNULL(CARDINALITY, 'NULL') as CARDINALITY, IFNULL(SUB_PART, 'NULL') as SUB_PART, IFNULL(PACKED, 'NULL') as PACKED, IFNULL(NULLABLE, 'NULL') as NULLABLE, IFNULL(INDEX_TYPE, 'NULL') as INDEX_TYPE FROM information_schema.statistics") if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) } else { for rows.Next() { err := rows.Scan(&information_schema_index.TABLE_SCHEMA, &information_schema_index.TABLE_NAME, &information_schema_index.INDEX_NAME, &information_schema_index.NON_UNIQUE, &information_schema_index.SEQ_IN_INDEX, &information_schema_index.COLUMN_NAME, &information_schema_index.COLLATION, &information_schema_index.CARDINALITY, &information_schema_index.SUB_PART, &information_schema_index.PACKED, &information_schema_index.NULLABLE, &information_schema_index.INDEX_TYPE) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } index := MetricGroupValue{"TABLE_SCHEMA": information_schema_index.TABLE_SCHEMA, "TABLE_NAME": information_schema_index.TABLE_NAME, "INDEX_NAME": information_schema_index.INDEX_NAME, "NON_UNIQUE": information_schema_index.NON_UNIQUE, "SEQ_IN_INDEX": information_schema_index.SEQ_IN_INDEX, "COLUMN_NAME": information_schema_index.COLUMN_NAME, "COLLATION": information_schema_index.COLLATION, "CARDINALITY": information_schema_index.CARDINALITY, "SUB_PART": information_schema_index.SUB_PART, "PACKED": information_schema_index.PACKED, "NULLABLE": information_schema_index.NULLABLE, "INDEX_TYPE": information_schema_index.INDEX_TYPE} @@ -193,7 +193,7 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { for rows.Next() { err := rows.Scan(&information_schema_index.TABLE_SCHEMA, &information_schema_index.TABLE_NAME, &information_schema_index.INDEX_NAME, &information_schema_index.NON_UNIQUE, &information_schema_index.SEQ_IN_INDEX, &information_schema_index.COLUMN_NAME, &information_schema_index.COLLATION, &information_schema_index.CARDINALITY, &information_schema_index.SUB_PART, &information_schema_index.PACKED, &information_schema_index.NULLABLE, &information_schema_index.INDEX_TYPE, &information_schema_index.EXPRESSION) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } index := MetricGroupValue{"TABLE_SCHEMA": information_schema_index.TABLE_SCHEMA, "TABLE_NAME": information_schema_index.TABLE_NAME, "INDEX_NAME": information_schema_index.INDEX_NAME, "NON_UNIQUE": information_schema_index.NON_UNIQUE, "SEQ_IN_INDEX": information_schema_index.SEQ_IN_INDEX, "COLUMN_NAME": information_schema_index.COLUMN_NAME, "COLLATION": information_schema_index.COLLATION, "CARDINALITY": information_schema_index.CARDINALITY, "SUB_PART": information_schema_index.SUB_PART, "PACKED": information_schema_index.PACKED, "NULLABLE": information_schema_index.NULLABLE, "INDEX_TYPE": information_schema_index.INDEX_TYPE, "EXPRESSION": information_schema_index.EXPRESSION} @@ -250,12 +250,12 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { rows, err := config.DB.Query("SELECT IFNULL(OBJECT_TYPE, 'NULL') as OBJECT_TYPE, IFNULL(OBJECT_SCHEMA, 'NULL') as OBJECT_SCHEMA, IFNULL(OBJECT_NAME, 'NULL') as OBJECT_NAME, IFNULL(INDEX_NAME, 'NULL') as INDEX_NAME, IFNULL(COUNT_STAR, 'NULL') as COUNT_STAR, IFNULL(SUM_TIMER_WAIT, 'NULL') as SUM_TIMER_WAIT, IFNULL(MIN_TIMER_WAIT, 'NULL') as MIN_TIMER_WAIT, IFNULL(AVG_TIMER_WAIT, 'NULL') as AVG_TIMER_WAIT, IFNULL(MAX_TIMER_WAIT, 'NULL') as MAX_TIMER_WAIT, IFNULL(COUNT_READ, 'NULL') as COUNT_READ, IFNULL(SUM_TIMER_READ, 'NULL') as SUM_TIMER_READ, IFNULL(MIN_TIMER_READ, 'NULL') as MIN_TIMER_READ, IFNULL(AVG_TIMER_READ, 'NULL') as AVG_TIMER_READ, IFNULL(MAX_TIMER_READ, 'NULL') as MAX_TIMER_READ, IFNULL(COUNT_WRITE, 'NULL') as COUNT_WRITE, IFNULL(SUM_TIMER_WRITE, 'NULL') as SUM_TIMER_WRITE, IFNULL(MIN_TIMER_WRITE, 'NULL') as MIN_TIMER_WRITE, IFNULL(AVG_TIMER_WRITE, 'NULL') as AVG_TIMER_WRITE, IFNULL(MAX_TIMER_WRITE, 'NULL') as MAX_TIMER_WRITE, IFNULL(COUNT_FETCH, 'NULL') as COUNT_FETCH, IFNULL(SUM_TIMER_FETCH, 'NULL') as SUM_TIMER_FETCH, IFNULL(MIN_TIMER_FETCH, 'NULL') as MIN_TIMER_FETCH, IFNULL(AVG_TIMER_FETCH, 'NULL') as AVG_TIMER_FETCH, IFNULL(MAX_TIMER_FETCH, 'NULL') as MAX_TIMER_FETCH, IFNULL(COUNT_INSERT, 'NULL') as COUNT_INSERT, IFNULL(SUM_TIMER_INSERT, 'NULL') as SUM_TIMER_INSERT, IFNULL(MIN_TIMER_INSERT, 'NULL') as MIN_TIMER_INSERT, IFNULL(AVG_TIMER_INSERT, 'NULL') as AVG_TIMER_INSERT, IFNULL(MAX_TIMER_INSERT, 'NULL') as MAX_TIMER_INSERT, IFNULL(COUNT_UPDATE, 'NULL') as COUNT_UPDATE, IFNULL(SUM_TIMER_UPDATE, 'NULL') as SUM_TIMER_UPDATE, IFNULL(MIN_TIMER_UPDATE, 'NULL') as MIN_TIMER_UPDATE, IFNULL(AVG_TIMER_UPDATE, 'NULL') as AVG_TIMER_UPDATE, IFNULL(MAX_TIMER_UPDATE, 'NULL') as MAX_TIMER_UPDATE, IFNULL(COUNT_DELETE, 'NULL') as COUNT_DELETE, IFNULL(SUM_TIMER_DELETE, 'NULL') as SUM_TIMER_DELETE, IFNULL(MIN_TIMER_DELETE, 'NULL') as MIN_TIMER_DELETE, IFNULL(AVG_TIMER_DELETE, 'NULL') as AVG_TIMER_DELETE, IFNULL(MAX_TIMER_DELETE, 'NULL') as MAX_TIMER_DELETE FROM performance_schema.table_io_waits_summary_by_index_usage") if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) } else { for rows.Next() { err := rows.Scan(&performance_schema_table_io_waits_summary_by_index_usage.OBJECT_TYPE, &performance_schema_table_io_waits_summary_by_index_usage.OBJECT_SCHEMA, &performance_schema_table_io_waits_summary_by_index_usage.OBJECT_NAME, &performance_schema_table_io_waits_summary_by_index_usage.INDEX_NAME, &performance_schema_table_io_waits_summary_by_index_usage.COUNT_STAR, &performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_WAIT, &performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_WAIT, &performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_WAIT, &performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_WAIT, &performance_schema_table_io_waits_summary_by_index_usage.COUNT_READ, &performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_READ, &performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_READ, &performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_READ, &performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_READ, &performance_schema_table_io_waits_summary_by_index_usage.COUNT_WRITE, &performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_WRITE, &performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_WRITE, &performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_WRITE, &performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_WRITE, &performance_schema_table_io_waits_summary_by_index_usage.COUNT_FETCH, &performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_FETCH, &performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_FETCH, &performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_FETCH, &performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_FETCH, &performance_schema_table_io_waits_summary_by_index_usage.COUNT_INSERT, &performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_INSERT, &performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_INSERT, &performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_INSERT, &performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_INSERT, &performance_schema_table_io_waits_summary_by_index_usage.COUNT_UPDATE, &performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_UPDATE, &performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_UPDATE, &performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_UPDATE, &performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_UPDATE, &performance_schema_table_io_waits_summary_by_index_usage.COUNT_DELETE, &performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_DELETE, &performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_DELETE, &performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_DELETE, &performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_DELETE) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } table_io_waits_summary_by_index_usage := MetricGroupValue{"OBJECT_TYPE": performance_schema_table_io_waits_summary_by_index_usage.OBJECT_TYPE, "OBJECT_SCHEMA": performance_schema_table_io_waits_summary_by_index_usage.OBJECT_SCHEMA, "OBJECT_NAME": performance_schema_table_io_waits_summary_by_index_usage.OBJECT_NAME, "INDEX_NAME": performance_schema_table_io_waits_summary_by_index_usage.INDEX_NAME, "COUNT_STAR": performance_schema_table_io_waits_summary_by_index_usage.COUNT_STAR, "SUM_TIMER_WAIT": performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_WAIT, "MIN_TIMER_WAIT": performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_WAIT, "AVG_TIMER_WAIT": performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_WAIT, "MAX_TIMER_WAIT": performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_WAIT, "COUNT_READ": performance_schema_table_io_waits_summary_by_index_usage.COUNT_READ, "SUM_TIMER_READ": performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_READ, "MIN_TIMER_READ": performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_READ, "AVG_TIMER_READ": performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_READ, "MAX_TIMER_READ": performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_READ, "COUNT_WRITE": performance_schema_table_io_waits_summary_by_index_usage.COUNT_WRITE, "SUM_TIMER_WRITE": performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_WRITE, "MIN_TIMER_WRITE": performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_WRITE, "AVG_TIMER_WRITE": performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_WRITE, "MAX_TIMER_WRITE": performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_WRITE, "COUNT_FETCH": performance_schema_table_io_waits_summary_by_index_usage.COUNT_FETCH, "SUM_TIMER_FETCH": performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_FETCH, "MIN_TIMER_FETCH": performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_FETCH, "AVG_TIMER_FETCH": performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_FETCH, "MAX_TIMER_FETCH": performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_FETCH, "COUNT_INSERT": performance_schema_table_io_waits_summary_by_index_usage.COUNT_INSERT, "SUM_TIMER_INSERT": performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_INSERT, "MIN_TIMER_INSERT": performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_INSERT, "AVG_TIMER_INSERT": performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_INSERT, "MAX_TIMER_INSERT": performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_INSERT, "COUNT_UPDATE": performance_schema_table_io_waits_summary_by_index_usage.COUNT_UPDATE, "SUM_TIMER_UPDATE": performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_UPDATE, "MIN_TIMER_UPDATE": performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_UPDATE, "AVG_TIMER_UPDATE": performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_UPDATE, "MAX_TIMER_UPDATE": performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_UPDATE, "COUNT_DELETE": performance_schema_table_io_waits_summary_by_index_usage.COUNT_DELETE, "SUM_TIMER_DELETE": performance_schema_table_io_waits_summary_by_index_usage.SUM_TIMER_DELETE, "MIN_TIMER_DELETE": performance_schema_table_io_waits_summary_by_index_usage.MIN_TIMER_DELETE, "AVG_TIMER_DELETE": performance_schema_table_io_waits_summary_by_index_usage.AVG_TIMER_DELETE, "MAX_TIMER_DELETE": performance_schema_table_io_waits_summary_by_index_usage.MAX_TIMER_DELETE} @@ -298,12 +298,12 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { rows, err := config.DB.Query("SELECT IFNULL(FILE_NAME, 'NULL') as FILE_NAME, IFNULL(EVENT_NAME, 'NULL') as EVENT_NAME, IFNULL(OBJECT_INSTANCE_BEGIN, 'NULL') as OBJECT_INSTANCE_BEGIN, IFNULL(COUNT_STAR, 'NULL') as COUNT_STAR, IFNULL(SUM_TIMER_WAIT, 'NULL') as SUM_TIMER_WAIT, IFNULL(MIN_TIMER_WAIT, 'NULL') as MIN_TIMER_WAIT, IFNULL(AVG_TIMER_WAIT, 'NULL') as AVG_TIMER_WAIT, IFNULL(MAX_TIMER_WAIT, 'NULL') as MAX_TIMER_WAIT, IFNULL(COUNT_READ, 'NULL') as COUNT_READ, IFNULL(SUM_TIMER_READ, 'NULL') as SUM_TIMER_READ, IFNULL(MIN_TIMER_READ, 'NULL') as MIN_TIMER_READ, IFNULL(AVG_TIMER_READ, 'NULL') as AVG_TIMER_READ, IFNULL(MAX_TIMER_READ, 'NULL') as MAX_TIMER_READ, IFNULL(SUM_NUMBER_OF_BYTES_READ, 'NULL') as SUM_NUMBER_OF_BYTES_READ, IFNULL(COUNT_WRITE, 'NULL') as COUNT_WRITE, IFNULL(SUM_TIMER_WRITE, 'NULL') as SUM_TIMER_WRITE, IFNULL(MIN_TIMER_WRITE, 'NULL') as MIN_TIMER_WRITE, IFNULL(AVG_TIMER_WRITE, 'NULL') as AVG_TIMER_WRITE, IFNULL(MAX_TIMER_WRITE, 'NULL') as MAX_TIMER_WRITE, IFNULL(SUM_NUMBER_OF_BYTES_WRITE, 'NULL') as SUM_NUMBER_OF_BYTES_WRITE, IFNULL(COUNT_MISC, 'NULL') as COUNT_MISC, IFNULL(SUM_TIMER_MISC, 'NULL') as SUM_TIMER_MISC, IFNULL(MIN_TIMER_MISC, 'NULL') as MIN_TIMER_MISC, IFNULL(AVG_TIMER_MISC, 'NULL') as AVG_TIMER_MISC, IFNULL(MAX_TIMER_MISC, 'NULL') as MAX_TIMER_MISC FROM performance_schema.file_summary_by_instance") if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) } else { for rows.Next() { err := rows.Scan(&performance_schema_file_summary_by_instance.FILE_NAME, &performance_schema_file_summary_by_instance.EVENT_NAME, &performance_schema_file_summary_by_instance.OBJECT_INSTANCE_BEGIN, &performance_schema_file_summary_by_instance.COUNT_STAR, &performance_schema_file_summary_by_instance.SUM_TIMER_WAIT, &performance_schema_file_summary_by_instance.MIN_TIMER_WAIT, &performance_schema_file_summary_by_instance.AVG_TIMER_WAIT, &performance_schema_file_summary_by_instance.MAX_TIMER_WAIT, &performance_schema_file_summary_by_instance.COUNT_READ, &performance_schema_file_summary_by_instance.SUM_TIMER_READ, &performance_schema_file_summary_by_instance.MIN_TIMER_READ, &performance_schema_file_summary_by_instance.AVG_TIMER_READ, &performance_schema_file_summary_by_instance.MAX_TIMER_READ, &performance_schema_file_summary_by_instance.SUM_NUMBER_OF_BYTES_READ, &performance_schema_file_summary_by_instance.COUNT_WRITE, &performance_schema_file_summary_by_instance.SUM_TIMER_WRITE, &performance_schema_file_summary_by_instance.MIN_TIMER_WRITE, &performance_schema_file_summary_by_instance.AVG_TIMER_WRITE, &performance_schema_file_summary_by_instance.MAX_TIMER_WRITE, &performance_schema_file_summary_by_instance.SUM_NUMBER_OF_BYTES_WRITE, &performance_schema_file_summary_by_instance.COUNT_MISC, &performance_schema_file_summary_by_instance.SUM_TIMER_MISC, &performance_schema_file_summary_by_instance.MIN_TIMER_MISC, &performance_schema_file_summary_by_instance.AVG_TIMER_MISC, &performance_schema_file_summary_by_instance.MAX_TIMER_MISC) if err != nil { - DbCollectQueries.logger.Error(err) + DbCollectQueriesOptimization.logger.Error(err) return err } index := MetricGroupValue{"FILE_NAME": performance_schema_file_summary_by_instance.FILE_NAME, "EVENT_NAME": performance_schema_file_summary_by_instance.EVENT_NAME, "OBJECT_INSTANCE_BEGIN": performance_schema_file_summary_by_instance.OBJECT_INSTANCE_BEGIN, "COUNT_STAR": performance_schema_file_summary_by_instance.COUNT_STAR, "SUM_TIMER_WAIT": performance_schema_file_summary_by_instance.SUM_TIMER_WAIT, "MIN_TIMER_WAIT": performance_schema_file_summary_by_instance.MIN_TIMER_WAIT, "AVG_TIMER_WAIT": performance_schema_file_summary_by_instance.AVG_TIMER_WAIT, "MAX_TIMER_WAIT": performance_schema_file_summary_by_instance.MAX_TIMER_WAIT, "COUNT_READ": performance_schema_file_summary_by_instance.COUNT_READ, "SUM_TIMER_READ": performance_schema_file_summary_by_instance.SUM_TIMER_READ, "MIN_TIMER_READ": performance_schema_file_summary_by_instance.MIN_TIMER_READ, "AVG_TIMER_READ": performance_schema_file_summary_by_instance.AVG_TIMER_READ, "MAX_TIMER_READ": performance_schema_file_summary_by_instance.MAX_TIMER_READ, "SUM_NUMBER_OF_BYTES_READ": performance_schema_file_summary_by_instance.SUM_NUMBER_OF_BYTES_READ, "COUNT_WRITE": performance_schema_file_summary_by_instance.COUNT_WRITE, "SUM_TIMER_WRITE": performance_schema_file_summary_by_instance.SUM_TIMER_WRITE, "MIN_TIMER_WRITE": performance_schema_file_summary_by_instance.MIN_TIMER_WRITE, "AVG_TIMER_WRITE": performance_schema_file_summary_by_instance.AVG_TIMER_WRITE, "MAX_TIMER_WRITE": performance_schema_file_summary_by_instance.MAX_TIMER_WRITE, "SUM_NUMBER_OF_BYTES_WRITE": performance_schema_file_summary_by_instance.SUM_NUMBER_OF_BYTES_WRITE, "COUNT_MISC": performance_schema_file_summary_by_instance.COUNT_MISC, "SUM_TIMER_MISC": performance_schema_file_summary_by_instance.SUM_TIMER_MISC, "MIN_TIMER_MISC": performance_schema_file_summary_by_instance.MIN_TIMER_MISC, "AVG_TIMER_MISC": performance_schema_file_summary_by_instance.AVG_TIMER_MISC, "MAX_TIMER_MISC": performance_schema_file_summary_by_instance.MAX_TIMER_MISC} @@ -315,8 +315,8 @@ func (DbCollectQueries *DbCollectQueries) GetMetrics(metrics *Metrics) error { metrics.DB.QueriesOptimization = QueriesOptimization - DbCollectQueries.logger.Debug("collectMetrics ", metrics.DB.Queries) - DbCollectQueries.logger.Debug("collectMetrics ", metrics.DB.QueriesOptimization) + DbCollectQueriesOptimization.logger.Debug("collectMetrics ", metrics.DB.Queries) + DbCollectQueriesOptimization.logger.Debug("collectMetrics ", metrics.DB.QueriesOptimization) return nil @@ -340,7 +340,17 @@ func CollectionExplain(digests map[string]MetricGroupValue, field_sorting string for _, p := range pairs { k := p[0].(string) - if digests[k]["schema_name"].(string) != "mysql" && digests[k]["schema_name"].(string) != "information_schema" && digests[k]["schema_name"].(string) != "performance_schema" && digests[k]["schema_name"].(string) != "NULL" && (strings.Contains(digests[k]["query_text"].(string), "SELECT") || strings.Contains(digests[k]["query_text"].(string), "select")) && digests[k]["explain"] == nil { + if digests[k]["schema_name"].(string) != "mysql" && digests[k]["schema_name"].(string) != "information_schema" && + digests[k]["schema_name"].(string) != "performance_schema" && digests[k]["schema_name"].(string) != "NULL" && + (strings.Contains(digests[k]["query_text"].(string), "SELECT") || strings.Contains(digests[k]["query_text"].(string), "select")) && + digests[k]["explain"] == nil { + + if (strings.Contains(digests[k]["query_text"].(string), "SELECT") || strings.Contains(digests[k]["query_text"].(string), "select")) && + strings.Contains(digests[k]["query_text"].(string), "SQL_NO_CACHE") && + !(strings.Contains(digests[k]["query_text"].(string), "WHERE") || strings.Contains(digests[k]["query_text"].(string), "where")) { + logger.Debug("Query From mysqldump", digests[k]["query_text"].(string)) + continue + } if strings.HasSuffix(digests[k]["query_text"].(string), "...") { digests[k]["explain"] = "need_full_query" diff --git a/metrics/runner.go b/metrics/runner.go index cd631d4..2ab9ba2 100644 --- a/metrics/runner.go +++ b/metrics/runner.go @@ -25,9 +25,9 @@ func makeTerminateChannel() <-chan os.Signal { return ch } -func RunWorker(gatherers []MetricsGatherer, gatherers_configuration []MetricsGatherer, repeaters map[string]MetricsRepeater, logger logging.Logger, - configuration *config.Config, configFile string, Mode Mode) { - var GenerateTimer, timer *time.Timer +func RunWorker(gatherers []MetricsGatherer, gatherers_configuration []MetricsGatherer, gatherers_query_optimization []MetricsGatherer, repeaters MetricsRepeater, logger logging.Logger, + configuration *config.Config, configFile string, Mode ModeT) { + var GenerateTimer, timer, QueryOptimizationTimer *time.Timer defer HandlePanic(configuration, logger) if logger == nil { if configuration.Debug { @@ -42,10 +42,13 @@ func RunWorker(gatherers []MetricsGatherer, gatherers_configuration []MetricsGat GenerateTimer = time.NewTimer(0 * time.Second) timer = time.NewTimer(3600 * time.Second) } else { - GenerateTimer = time.NewTimer(configuration.GenerateConfigSeconds * time.Second) + GenerateTimer = time.NewTimer(configuration.GenerateConfigPeriod * time.Second) timer = time.NewTimer(1 * time.Second) } - + QueryOptimizationTimer = time.NewTimer(10 * time.Second) + if !configuration.QueryOptimization { + QueryOptimizationTimer.Stop() + } terminator := makeTerminateChannel() for { select { @@ -54,13 +57,13 @@ func RunWorker(gatherers []MetricsGatherer, gatherers_configuration []MetricsGat os.Exit(0) case <-timer.C: logger.Println("Starting collection of data for saving a metrics...", timer) - timer.Reset(configuration.TimePeriodSeconds * time.Second) + timer.Reset(configuration.MetricsPeriod * time.Second) go func() { defer HandlePanic(configuration, logger) Ready = false metrics := collectMetrics(gatherers, logger, configuration) if Ready { - task := processRepeaters(metrics, repeaters["Metrics"], configuration, logger) + task := processRepeaters(metrics, repeaters, configuration, logger, ModeT{Name: "Metrics", ModeType: ""}) if task == "Task" { logger.Println(" * A task has been found for the agent...") f := processTaskFunc(metrics, repeaters, gatherers, logger, configuration) @@ -72,19 +75,22 @@ func RunWorker(gatherers []MetricsGatherer, gatherers_configuration []MetricsGat logger.Println("End collection of metrics for saving a metrics...", timer) case <-GenerateTimer.C: logger.Println("Starting collection of data for generating a config...", GenerateTimer) - GenerateTimer.Reset(configuration.GenerateConfigSeconds * time.Second) + GenerateTimer.Reset(configuration.GenerateConfigPeriod * time.Second) go func() { + var metrics Metrics logger.Println(" * Collecting metrics to recommend a config...") defer HandlePanic(configuration, logger) Ready = false - metrics := collectMetrics(append(gatherers, gatherers_configuration...), logger, configuration) + if Mode.Name == "TaskSet" && Mode.ModeType == "queries_optimization" { + metrics = collectMetrics(append(gatherers, gatherers_query_optimization...), logger, configuration) + } else { + metrics = collectMetrics(append(gatherers, gatherers_configuration...), logger, configuration) + } if Ready { logger.Println(" * Sending metrics to Releem Cloud Platform...") - processRepeaters(metrics, repeaters[Mode.Name], configuration, logger) + processRepeaters(metrics, repeaters, configuration, logger, Mode) if Mode.Name == "Configurations" { - logger.Println("1. Recommended MySQL configuration downloaded to ", configuration.GetReleemConfDir()) - logger.Println("2. To check MySQL Performance Score please visit https://app.releem.com/dashboard?menu=metrics") - logger.Println("3. To apply the recommended configuration please read documentation https://app.releem.com/dashboard") + logger.Println("Recommended MySQL configuration downloaded to ", configuration.GetReleemConfDir()) } } if (Mode.Name == "Configurations" && Mode.ModeType != "default") || Mode.Name == "Event" || Mode.Name == "TaskSet" { @@ -94,23 +100,36 @@ func RunWorker(gatherers []MetricsGatherer, gatherers_configuration []MetricsGat logger.Println("Saved a config...") }() logger.Println("End collection of metrics for saving a metrics...", GenerateTimer) + case <-QueryOptimizationTimer.C: + logger.Println("Starting collection of data for queries optimization...", GenerateTimer) + QueryOptimizationTimer.Reset(configuration.QueryOptimizationPeriod * time.Second) + go func() { + defer HandlePanic(configuration, logger) + Ready = false + logger.Println("QueryOptimization") + metrics := collectMetrics(append(gatherers, gatherers_query_optimization...), logger, configuration) + if Ready { + processRepeaters(metrics, repeaters, configuration, logger, ModeT{Name: "Metrics", ModeType: "QueryOptimization"}) + } + logger.Println("Saved a queries...") + }() } logger.Info("LOOP") } } -func processTaskFunc(metrics Metrics, repeaters map[string]MetricsRepeater, gatherers []MetricsGatherer, logger logging.Logger, configuration *config.Config) func() { +func processTaskFunc(metrics Metrics, repeaters MetricsRepeater, gatherers []MetricsGatherer, logger logging.Logger, configuration *config.Config) func() { return func() { processTask(metrics, repeaters, gatherers, logger, configuration) } } -func processTask(metrics Metrics, repeaters map[string]MetricsRepeater, gatherers []MetricsGatherer, logger logging.Logger, configuration *config.Config) { +func processTask(metrics Metrics, repeaters MetricsRepeater, gatherers []MetricsGatherer, logger logging.Logger, configuration *config.Config) { defer HandlePanic(configuration, logger) output := make(MetricGroupValue) //metrics := collectMetrics(gatherers, logger) var task_output string - task := processRepeaters(metrics, repeaters["TaskGet"], configuration, logger) + task := processRepeaters(metrics, repeaters, configuration, logger, ModeT{Name: "TaskGet", ModeType: ""}) if task.(Task).TaskTypeID == nil { return } @@ -125,7 +144,7 @@ func processTask(metrics Metrics, repeaters map[string]MetricsRepeater, gatherer output["task_output"] = "" metrics.ReleemAgent.Tasks = output - processRepeaters(metrics, repeaters["TaskStatus"], configuration, logger) + processRepeaters(metrics, repeaters, configuration, logger, ModeT{Name: "TaskStatus", ModeType: ""}) logger.Println(" * Task with id -", TaskID, "and type id -", TaskTypeID, "is being started...") if TaskTypeID == 0 { @@ -161,7 +180,7 @@ func processTask(metrics Metrics, repeaters map[string]MetricsRepeater, gatherer output["task_exit_code"], output["task_status"], task_output = execCmd(configuration.ReleemDir+"/mysqlconfigurer.sh -u", []string{}, logger) output["task_output"] = output["task_output"].(string) + task_output } else if TaskTypeID == 3 { - output["task_exit_code"], output["task_status"], task_output = execCmd(configuration.ReleemDir+"/releem-agent --task=collect_queries", []string{}, logger) + output["task_exit_code"], output["task_status"], task_output = execCmd(configuration.ReleemDir+"/releem-agent --task=queries_optimization", []string{}, logger) output["task_output"] = output["task_output"].(string) + task_output } else if TaskTypeID == 4 { if configuration.InstanceType != "aws" { @@ -176,7 +195,7 @@ func processTask(metrics Metrics, repeaters map[string]MetricsRepeater, gatherer need_flush := false error_exist := false - recommend_var := processRepeaters(metrics, repeaters["GetConfigurationJson"], configuration, logger) + recommend_var := processRepeaters(metrics, repeaters, configuration, logger, ModeT{Name: "Configurations", ModeType: "get-json"}) err := json.Unmarshal([]byte(recommend_var.(string)), &result_data) if err != nil { logger.Error(err) @@ -265,7 +284,7 @@ func processTask(metrics Metrics, repeaters map[string]MetricsRepeater, gatherer logger.Debug(output) logger.Println(" * Task with id -", TaskID, "and type id -", TaskTypeID, "completed with code", output["task_exit_code"]) metrics.ReleemAgent.Tasks = output - processRepeaters(metrics, repeaters["TaskStatus"], configuration, logger) + processRepeaters(metrics, repeaters, configuration, logger, ModeT{Name: "TaskStatus", ModeType: ""}) } @@ -297,10 +316,10 @@ func execCmd(cmd_path string, environment []string, logger logging.Logger) (int, return task_exit_code, task_status, task_output } func processRepeaters(metrics Metrics, repeaters MetricsRepeater, - configuration *config.Config, logger logging.Logger) interface{} { + configuration *config.Config, logger logging.Logger, Mode ModeT) interface{} { defer HandlePanic(configuration, logger) - result, err := repeaters.ProcessMetrics(configuration, metrics) + result, err := repeaters.ProcessMetrics(configuration, metrics, Mode) if err != nil { logger.PrintError("Repeater failed", err) } diff --git a/mysqlconfigurer.sh b/mysqlconfigurer.sh index 3dd4080..a9f967f 100755 --- a/mysqlconfigurer.sh +++ b/mysqlconfigurer.sh @@ -1,5 +1,5 @@ #!/bin/bash -# mysqlconfigurer.sh - Version 1.18.1 +# mysqlconfigurer.sh - Version 1.19.0 # (C) Releem, Inc 2022 # All rights reserved @@ -12,7 +12,7 @@ MYSQLTUNER_REPORT=$MYSQLCONFIGURER_PATH"mysqltunerreport.json" RELEEM_MYSQL_VERSION=$MYSQLCONFIGURER_PATH"mysql_version" MYSQLCONFIGURER_CONFIGFILE="${MYSQLCONFIGURER_PATH}${MYSQLCONFIGURER_FILE_NAME}" MYSQL_MEMORY_LIMIT=0 -VERSION="1.18.1" +VERSION="1.19.0" RELEEM_INSTALL_PATH=$MYSQLCONFIGURER_PATH"install.sh" logfile="releem-mysqlconfigurer.log" @@ -609,8 +609,8 @@ if test -f $RELEEM_CONF_FILE ; then else connection_string="${connection_string} --port=3306" fi - if [ ! -z "$collect_explain" ]; then - RELEEM_QUERY_OPTIMIZATION=$collect_explain + if [ ! -z "$query_optimization" ]; then + RELEEM_QUERY_OPTIMIZATION=$query_optimization fi fi diff --git a/releem.conf b/releem.conf index 3985df6..657356b 100644 --- a/releem.conf +++ b/releem.conf @@ -17,18 +17,22 @@ hostname="" # Defaults to 0, Mysql memory usage limit. memory_limit=0 -# TimePeriodSeconds time.Duration `hcl:"interval_seconds"` +# MetricsPeriod time.Duration `hcl:"interval_seconds"` # Defaults to 30 seconds, how often metrics are collected. interval_seconds=60 -# ReadConfigSeconds time.Duration `hcl:"interval_read_config_seconds"` +# ReadConfigPeriod time.Duration `hcl:"interval_read_config_seconds"` # Defaults to 3600 seconds, how often to update the values from the config. interval_read_config_seconds=3600 -# GenerateConfigSeconds time.Duration `hcl:"interval_generate_config_seconds"` +# GenerateConfigPeriod time.Duration `hcl:"interval_generate_config_seconds"` # Defaults to 43200 seconds, how often to generate recommend the config. interval_generate_config_seconds=43200 +# QueryOptimization time.Duration `hcl:"interval_query_optimization_seconds"` +# Defaults to 3600 seconds, how often query metrics are collected. +interval_query_optimization_seconds=3600 + # MysqlUser string`hcl:"mysql_user"` # Mysql user name for collection metrics. mysql_user="releem" @@ -56,3 +60,7 @@ mysql_cnf_dir="/etc/mysql/releem.conf.d" # ReleemConfDir string `hcl:"releem_cnf_dir"` # Defaults to 3600 seconds, Releem Agent configuration path. releem_cnf_dir="/opt/releem/conf" + +# Collect Explain bool `hcl:"query_optimization"` +# Releem collect explain for query +query_optimization=false diff --git a/repeater/releemConfiguration.go b/repeater/releemConfiguration.go index 0705e25..8868aa2 100644 --- a/repeater/releemConfiguration.go +++ b/repeater/releemConfiguration.go @@ -18,12 +18,11 @@ import ( type ReleemConfigurationsRepeater struct { logger logging.Logger configuration *config.Config - Mode m.Mode } -func (repeater ReleemConfigurationsRepeater) ProcessMetrics(context m.MetricContext, metrics m.Metrics) (interface{}, error) { +func (repeater ReleemConfigurationsRepeater) ProcessMetrics(context m.MetricContext, metrics m.Metrics, Mode m.ModeT) (interface{}, error) { defer m.HandlePanic(repeater.configuration, repeater.logger) - repeater.logger.Debug(repeater.Mode.Name, repeater.Mode.ModeType) + repeater.logger.Debug(Mode.Name, Mode.ModeType) e, _ := json.Marshal(metrics) bodyReader := strings.NewReader(string(e)) repeater.logger.Debug("Result Send data: ", string(e)) @@ -40,31 +39,33 @@ func (repeater ReleemConfigurationsRepeater) ProcessMetrics(context m.MetricCont subdomain = "" } - if repeater.Mode.Name == "TaskSet" && repeater.Mode.ModeType == "collect_queries" { + if Mode.Name == "TaskSet" && Mode.ModeType == "queries_optimization" { + api_domain = "https://api.queries." + subdomain + "releem.com/v2/" + } else if Mode.Name == "Metrics" && Mode.ModeType == "QueryOptimization" { api_domain = "https://api.queries." + subdomain + "releem.com/v2/" } else { api_domain = "https://api." + subdomain + "releem.com/v2/" } - if repeater.Mode.Name == "Configurations" { - if repeater.Mode.ModeType == "set" { + if Mode.Name == "Configurations" { + if Mode.ModeType == "set" { api_domain = api_domain + "mysql" - } else if repeater.Mode.ModeType == "get" { + } else if Mode.ModeType == "get" { api_domain = api_domain + "config" - } else if repeater.Mode.ModeType == "get-json" { + } else if Mode.ModeType == "get-json" { api_domain = api_domain + "config?json=1" } else { api_domain = api_domain + "mysql" } - } else if repeater.Mode.Name == "Metrics" { + } else if Mode.Name == "Metrics" { api_domain = api_domain + "metrics" - } else if repeater.Mode.Name == "Event" { - api_domain = api_domain + "event/" + repeater.Mode.ModeType - } else if repeater.Mode.Name == "TaskGet" { + } else if Mode.Name == "Event" { + api_domain = api_domain + "event/" + Mode.ModeType + } else if Mode.Name == "TaskGet" { api_domain = api_domain + "task/task_get" - } else if repeater.Mode.Name == "TaskSet" { - api_domain = api_domain + "task/" + repeater.Mode.ModeType - } else if repeater.Mode.Name == "TaskStatus" { + } else if Mode.Name == "TaskSet" { + api_domain = api_domain + "task/" + Mode.ModeType + } else if Mode.Name == "TaskStatus" { api_domain = api_domain + "task/task_status" } repeater.logger.Debug(api_domain) @@ -98,7 +99,7 @@ func (repeater ReleemConfigurationsRepeater) ProcessMetrics(context m.MetricCont repeater.logger.Debug("Response: status code: ", res.StatusCode) repeater.logger.Debug("Response: body:\n", string(body_res)) - if repeater.Mode.Name == "Configurations" { + if Mode.Name == "Configurations" { err = os.WriteFile(context.GetReleemConfDir()+"/z_aiops_mysql.cnf", body_res, 0644) if err != nil { repeater.logger.Error("WriteFile: Error write to file: ", err) @@ -106,29 +107,29 @@ func (repeater ReleemConfigurationsRepeater) ProcessMetrics(context m.MetricCont } return string(body_res), err - } else if repeater.Mode.Name == "Metrics" { + } else if Mode.Name == "Metrics" { return string(body_res), err - } else if repeater.Mode.Name == "Event" { + } else if Mode.Name == "Event" { return nil, err - } else if repeater.Mode.Name == "TaskGet" { + } else if Mode.Name == "TaskGet" { result_data := m.Task{} err := json.Unmarshal(body_res, &result_data) return result_data, err - } else if repeater.Mode.Name == "TaskSet" { + } else if Mode.Name == "TaskSet" { return nil, err - } else if repeater.Mode.Name == "TaskStatus" { + } else if Mode.Name == "TaskStatus" { return nil, err } } return nil, err } -func NewReleemConfigurationsRepeater(configuration *config.Config, Mode m.Mode) ReleemConfigurationsRepeater { +func NewReleemConfigurationsRepeater(configuration *config.Config) ReleemConfigurationsRepeater { var logger logging.Logger if configuration.Debug { logger = logging.NewSimpleDebugLogger("ReleemRepeaterConfigurations") } else { logger = logging.NewSimpleLogger("ReleemRepeaterConfigurations") } - return ReleemConfigurationsRepeater{logger, configuration, Mode} + return ReleemConfigurationsRepeater{logger, configuration} }