diff --git a/Makefile b/Makefile index 4dc1be8..0170858 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,7 @@ -BEATNAME=sqlbeat -BEAT_DIR=github.com/adibendahan +BEAT_NAME=sqlbeat +BEAT_PATH=github.com/adibendahan/sqlbeat +BEAT_GOPATH=$(firstword $(subst :, ,${GOPATH})) +BEAT_URL=https://${BEAT_PATH} SYSTEM_TESTS=false TEST_ENVIRONMENT=false ES_BEATS=./vendor/github.com/elastic/beats @@ -9,30 +11,28 @@ PREFIX?=. # Path to the libbeat Makefile -include $(ES_BEATS)/libbeat/scripts/Makefile -.PHONY: init -init: - glide update --no-recursive +# Initial beat setup +.PHONY: setup +setup: copy-vendor make update - git init -.PHONY: commit -commit: - git add README.md CONTRIBUTING.md - git commit -m "Initial commit" - git add LICENSE - git commit -m "Add the LICENSE" - git add .gitignore .gitattributes - git commit -m "Add git settings" - git add . - git reset -- .travis.yml - git commit -m "Add sqlbeat" - git add .travis.yml - git commit -m "Add Travis CI" +# .PHONY: init +# init: +# glide update --no-recursive +# make update +# git init -.PHONY: update-deps -update-deps: - glide update --no-recursive +# Copy beats into vendor directory +.PHONY: copy-vendor +copy-vendor: + mkdir -p vendor/github.com/elastic/ + -cp -R ${BEAT_GOPATH}/src/github.com/elastic/beats vendor/github.com/elastic/ + rm -rf vendor/github.com/elastic/beats/.git # This is called by the beats packer before building starts .PHONY: before-build before-build: + +# Collects all dependencies and then calls update +.PHONY: collect +collect: diff --git a/_meta/beat.yml b/_meta/beat.yml new file mode 100644 index 0000000..0d6c2c8 --- /dev/null +++ b/_meta/beat.yml @@ -0,0 +1,46 @@ +################### Sqlbeat Configuration Example ######################### + +############################# Sqlbeat ###################################### + +sqlbeat: + # Defines how often an event is sent to the output + period: 10s + + # Defines the DB type you are connecting, currently supporting 'mysql' / 'mssql' / 'postgres' + # This attribute is required -- there is no default value. + #dbtype: "mysql" + + # Defines the sql hostname that the beat will connect to + #hostname: "127.0.0.1" + + # Defines the sql port - leave commented for default ports + #port: "3306" + + # MAKE SURE THE USER ONLY HAS PERMISSIONS TO RUN THE QUERY DESIRED AND NOTHING ELSE. + # Defines the mysql user to use + #username: "sqlbeat_user" + + # Defines the mysql password to use - option #1 - plain text + #password: "sqlbeat_pass" + + # Defines the mysql password to use - option #2 - AES encryption (see github.com/adibendahan/mysqlbeat-password-encrypter) + #encryptedpassword: "2321f38819cf693951e88f00cd82" + + # Defines the database to connect, optional for all except DB type postgres + #database: "sqlbeat" + + # Defines SSL mode for postgres + #postgressslmode: "disable" + + # Defines the queries that will run - the query below is an example + #queries: [ "select * from tbl"] + + # Defines the queries result types + # 'single-row' will be translated as columnname:value + # 'two-columns' will be translated as value-column1:value-column2 for each row + # 'multiple-rows' each row will be a document (with columnname:value) + # 'show-slave-delay' will only send the `Seconds_Behind_Master` column from SHOW SLAVE STATUS (for MySQL use) + #querytypes: ["multiple-rows"] + + # Colums that end with the following wild card will report only delta in seconds ((neval - oldval)/timediff.Seconds()) + #deltawildcard: "__DELTA" diff --git a/_meta/fields.yml b/_meta/fields.yml new file mode 100644 index 0000000..1963943 --- /dev/null +++ b/_meta/fields.yml @@ -0,0 +1,9 @@ +- key: sqlbeat + title: sqlbeat + description: + fields: + - name: counter + type: long + required: true + description: > + PLEASE UPDATE DOCUMENTATION diff --git a/_meta/kibana/index-pattern/sqlbeat.json b/_meta/kibana/index-pattern/sqlbeat.json new file mode 100644 index 0000000..11d6586 --- /dev/null +++ b/_meta/kibana/index-pattern/sqlbeat.json @@ -0,0 +1,6 @@ +{ + "fields": "[{\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.hostname\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.version\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"@timestamp\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"date\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"tags\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"fields\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.provider\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.instance_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.machine_type\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.availability_zone\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.project_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.region\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"counter\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"number\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_id\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"_type\", \"searchable\": true, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_index\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_score\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"number\", \"scripted\": false}]", + "fieldFormatMap": "{\"@timestamp\": {\"id\": \"date\"}}", + "timeFieldName": "@timestamp", + "title": "sqlbeat-*" +} \ No newline at end of file diff --git a/beater/sqlbeat.go b/beater/sqlbeat.go index 539c5bc..ad75d50 100644 --- a/beater/sqlbeat.go +++ b/beater/sqlbeat.go @@ -11,11 +11,13 @@ import ( "strings" "time" - "github.com/adibendahan/sqlbeat/config" "github.com/elastic/beats/libbeat/beat" - "github.com/elastic/beats/libbeat/cfgfile" + // "github.com/elastic/beats/libbeat/cfgfile" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/publisher" + + "github.com/adibendahan/sqlbeat/config" // sql go drivers _ "github.com/denisenkom/go-mssqldb" @@ -25,20 +27,9 @@ import ( // Sqlbeat is a struct to hold the beat config & info type Sqlbeat struct { - beatConfig *config.Config done chan struct{} - period time.Duration - dbType string - hostname string - port string - username string - password string - passwordAES string - database string - postgresSSLMode string - queries []string - queryTypes []string - deltaWildcard string + config config.Config + client publisher.Client oldValues common.MapStr oldValuesAge common.MapStr @@ -60,15 +51,9 @@ const ( dbtMSSQL = "mssql" dbtPSQL = "postgres" - // default values - defaultPeriod = "10s" - defaultHostname = "127.0.0.1" defaultPortMySQL = "3306" defaultPortMSSQL = "1433" defaultPortPSQL = "5432" - defaultUsername = "sqlbeat_user" - defaultPassword = "sqlbeat_pass" - defaultDeltaWildcard = "__DELTA" // query types values queryTypeSingleRow = "single-row" @@ -86,31 +71,47 @@ const ( ) // New Creates beater -func New() *Sqlbeat { - return &Sqlbeat{ +func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) { + logp.Info(">>> New()") + + config := config.DefaultConfig + if err := cfg.Unpack(&config); err != nil { + return nil, fmt.Errorf("Error reading config file: %v", err) + } + + + logp.Info(" Config = \n%+v\n", config) + bt := &Sqlbeat{ done: make(chan struct{}), + config: config, + } + + if err := bt.Setup(b); err != nil { + return nil, fmt.Errorf("Error validating config file: %v", err) } + + return bt, nil } ///*** Beater interface methods ***/// // Config is a function to read config file -func (bt *Sqlbeat) Config(b *beat.Beat) error { +// func (bt *Sqlbeat) Config(b *beat.Beat) error { - // Load beater beatConfig - err := cfgfile.Read(&bt.beatConfig, "") - if err != nil { - return fmt.Errorf("Error reading config file: %v", err) - } +// // Load beater beatConfig +// err := cfgfile.Read(&bt.config, "") +// if err != nil { +// return fmt.Errorf("Error reading config file: %v", err) +// } - return nil -} +// return nil +// } -// Setup is a function to setup all beat config & info into the beat struct +// Setup is a function to validate func (bt *Sqlbeat) Setup(b *beat.Beat) error { - + logp.Info(">>> Setup()") // Config errors handling - switch bt.beatConfig.Sqlbeat.DBType { + switch bt.config.DBType { case dbtMSSQL, dbtMySQL, dbtPSQL: break default: @@ -118,118 +119,78 @@ func (bt *Sqlbeat) Setup(b *beat.Beat) error { return err } - if len(bt.beatConfig.Sqlbeat.Queries) < 1 { + if len(bt.config.Queries) < 1 { err := fmt.Errorf("There are no queries to execute") return err } - if len(bt.beatConfig.Sqlbeat.Queries) != len(bt.beatConfig.Sqlbeat.QueryTypes) { + if len(bt.config.Queries) != len(bt.config.QueryTypes) { err := fmt.Errorf("Config file error, queries != queryTypes array length (each query should have a corresponding type on the same index)") return err } - if bt.beatConfig.Sqlbeat.DBType == dbtPSQL { - if bt.beatConfig.Sqlbeat.Database == "" { + if bt.config.DBType == dbtPSQL { + if bt.config.Database == "" { err := fmt.Errorf("Database must be selected when using DB type postgres") return err } - if bt.beatConfig.Sqlbeat.PostgresSSLMode == "" { + if bt.config.PostgresSSLMode == "" { err := fmt.Errorf("PostgresSSLMode must be selected when using DB type postgres") return err } } - // Setting defaults for missing config - if bt.beatConfig.Sqlbeat.Period == "" { - logp.Info("Period not selected, proceeding with '%v' as default", defaultPeriod) - bt.beatConfig.Sqlbeat.Period = defaultPeriod - } - - if bt.beatConfig.Sqlbeat.Hostname == "" { - logp.Info("Hostname not selected, proceeding with '%v' as default", defaultHostname) - bt.beatConfig.Sqlbeat.Hostname = defaultHostname - } - - if bt.beatConfig.Sqlbeat.Port == "" { - switch bt.beatConfig.Sqlbeat.DBType { + if bt.config.Port == "" { + switch bt.config.DBType { case dbtMSSQL: - bt.beatConfig.Sqlbeat.Port = defaultPortMSSQL + bt.config.Port = defaultPortMSSQL case dbtMySQL: - bt.beatConfig.Sqlbeat.Port = defaultPortMySQL + bt.config.Port = defaultPortMySQL case dbtPSQL: - bt.beatConfig.Sqlbeat.Port = defaultPortPSQL + bt.config.Port = defaultPortPSQL } - logp.Info("Port not selected, proceeding with '%v' as default", bt.beatConfig.Sqlbeat.Port) + logp.Info("Port not selected, proceeding with '%v' as default", bt.config.Port) } - if bt.beatConfig.Sqlbeat.Username == "" { - logp.Info("Username not selected, proceeding with '%v' as default", defaultUsername) - bt.beatConfig.Sqlbeat.Username = defaultUsername - } - - if bt.beatConfig.Sqlbeat.Password == "" && bt.beatConfig.Sqlbeat.EncryptedPassword == "" { - logp.Info("Password not selected, proceeding with default password") - bt.beatConfig.Sqlbeat.Password = defaultPassword - } - - if bt.beatConfig.Sqlbeat.DeltaWildcard == "" { - logp.Info("DeltaWildcard not selected, proceeding with '%v' as default", defaultDeltaWildcard) - bt.beatConfig.Sqlbeat.DeltaWildcard = defaultDeltaWildcard - } - - // Parse the Period string - var durationParseError error - bt.period, durationParseError = time.ParseDuration(bt.beatConfig.Sqlbeat.Period) - if durationParseError != nil { - return durationParseError - } // Handle password decryption and save in the bt - if bt.beatConfig.Sqlbeat.Password != "" { - bt.password = bt.beatConfig.Sqlbeat.Password - } else if bt.beatConfig.Sqlbeat.EncryptedPassword != "" { + // if bt.config.Password != "" { + // bt.password = bt.config.Password + // } else + if bt.config.EncryptedPassword != "" { aesCipher, err := aes.NewCipher([]byte(secret)) if err != nil { return err } cfbDecrypter := cipher.NewCFBDecrypter(aesCipher, commonIV) - chiperText, err := hex.DecodeString(bt.beatConfig.Sqlbeat.EncryptedPassword) + chiperText, err := hex.DecodeString(bt.config.EncryptedPassword) if err != nil { return err } plaintextCopy := make([]byte, len(chiperText)) cfbDecrypter.XORKeyStream(plaintextCopy, chiperText) - bt.password = string(plaintextCopy) + bt.config.Password = string(plaintextCopy) } // init the oldValues and oldValuesAge array bt.oldValues = common.MapStr{"sqlbeat": "init"} bt.oldValuesAge = common.MapStr{"sqlbeat": "init"} - // Save config values to the bt - bt.dbType = bt.beatConfig.Sqlbeat.DBType - bt.hostname = bt.beatConfig.Sqlbeat.Hostname - bt.port = bt.beatConfig.Sqlbeat.Port - bt.username = bt.beatConfig.Sqlbeat.Username - bt.database = bt.beatConfig.Sqlbeat.Database - bt.postgresSSLMode = bt.beatConfig.Sqlbeat.PostgresSSLMode - bt.queries = bt.beatConfig.Sqlbeat.Queries - bt.queryTypes = bt.beatConfig.Sqlbeat.QueryTypes - bt.deltaWildcard = bt.beatConfig.Sqlbeat.DeltaWildcard - - logp.Info("Total # of queries to execute: %d", len(bt.queries)) - for index, queryStr := range bt.queries { - logp.Info("Query #%d (type: %s): %s", index+1, bt.queryTypes[index], queryStr) + logp.Info("Total # of queries to execute: %d", len(bt.config.Queries)) + for index, queryStr := range bt.config.Queries { + logp.Info("Query #%d (type: %s): %s", index+1, bt.config.QueryTypes[index], queryStr) } return nil } -// Run is a functions that runs the beat +// Run is a function that runs the beat func (bt *Sqlbeat) Run(b *beat.Beat) error { logp.Info("sqlbeat is running! Hit CTRL-C to stop it.") - ticker := time.NewTicker(bt.period) + bt.client = b.Publisher.Connect() + logp.Info("Connected; ticker period is %v", bt.config.Period) + ticker := time.NewTicker(bt.config.Period) for { select { case <-bt.done: @@ -244,13 +205,10 @@ func (bt *Sqlbeat) Run(b *beat.Beat) error { } } -// Cleanup is a function that does nothing on this beat :) -func (bt *Sqlbeat) Cleanup(b *beat.Beat) error { - return nil -} // Stop is a function that runs once the beat is stopped func (bt *Sqlbeat) Stop() { + bt.client.Close() close(bt.done) } @@ -261,21 +219,21 @@ func (bt *Sqlbeat) beat(b *beat.Beat) error { connString := "" - switch bt.dbType { + switch bt.config.DBType { case dbtMSSQL: connString = fmt.Sprintf("server=%v;user id=%v;password=%v;port=%v;database=%v", - bt.hostname, bt.username, bt.password, bt.port, bt.database) + bt.config.Hostname, bt.config.Username, bt.config.Password, bt.config.Port, bt.config.Database) case dbtMySQL: connString = fmt.Sprintf("%v:%v@tcp(%v:%v)/%v", - bt.username, bt.password, bt.hostname, bt.port, bt.database) + bt.config.Username, bt.config.Password, bt.config.Hostname, bt.config.Port, bt.config.Database) case dbtPSQL: connString = fmt.Sprintf("%v://%v:%v@%v:%v/%v?sslmode=%v", - dbtPSQL, bt.username, bt.password, bt.hostname, bt.port, bt.database, bt.postgresSSLMode) + dbtPSQL, bt.config.Username, bt.config.Password, bt.config.Hostname, bt.config.Port, bt.config.Database, bt.config.PostgresSSLMode) } - db, err := sql.Open(bt.dbType, connString) + db, err := sql.Open(bt.config.DBType, connString) if err != nil { return err } @@ -285,7 +243,7 @@ func (bt *Sqlbeat) beat(b *beat.Beat) error { var twoColumnEvent common.MapStr LoopQueries: - for index, queryStr := range bt.queries { + for index, queryStr := range bt.config.Queries { // Log the query run time and run the query dtNow := time.Now() rows, err := db.Query(queryStr) @@ -300,40 +258,42 @@ LoopQueries: } // Populate the two-columns event - if bt.queryTypes[index] == queryTypeTwoColumns { + if bt.config.QueryTypes[index] == queryTypeTwoColumns { twoColumnEvent = common.MapStr{ "@timestamp": common.Time(dtNow), - "type": bt.dbType, + "type": bt.config.DBType, } } LoopRows: for rows.Next() { - switch bt.queryTypes[index] { + switch bt.config.QueryTypes[index] { case queryTypeSingleRow, queryTypeSlaveDelay: // Generate an event from the current row - event, err := bt.generateEventFromRow(rows, columns, bt.queryTypes[index], dtNow) + event, err := bt.generateEventFromRow(rows, columns, bt.config.QueryTypes[index], dtNow) if err != nil { logp.Err("Query #%v error generating event from rows: %v", index, err) } else if event != nil { - b.Events.PublishEvent(event) - logp.Info("%v event sent", bt.queryTypes[index]) + // b.Events.PublishEvent(event) + bt.client.PublishEvent(event) + logp.Info("%v event sent", bt.config.QueryTypes[index]) } // breaking after the first row break LoopRows case queryTypeMultipleRows: // Generate an event from the current row - event, err := bt.generateEventFromRow(rows, columns, bt.queryTypes[index], dtNow) + event, err := bt.generateEventFromRow(rows, columns, bt.config.QueryTypes[index], dtNow) if err != nil { logp.Err("Query #%v error generating event from rows: %v", index, err) break LoopRows } else if event != nil { - b.Events.PublishEvent(event) - logp.Info("%v event sent", bt.queryTypes[index]) + // b.Events.PublishEvent(event) + bt.client.PublishEvent(event) + logp.Info("%v event sent", bt.config.QueryTypes[index]) } // Move to the next row @@ -354,8 +314,8 @@ LoopQueries: } // If the two-columns event has data, publish it - if bt.queryTypes[index] == queryTypeTwoColumns && len(twoColumnEvent) > 2 { - b.Events.PublishEvent(twoColumnEvent) + if bt.config.QueryTypes[index] == queryTypeTwoColumns && len(twoColumnEvent) > 2 { + bt.client.PublishEvent(twoColumnEvent) logp.Info("%v event sent", queryTypeTwoColumns) twoColumnEvent = nil } @@ -410,7 +370,7 @@ func (bt *Sqlbeat) appendRowToEvent(event common.MapStr, row *sql.Rows, columns } // If the column name ends with the deltaWildcard - if strings.HasSuffix(strColName, bt.deltaWildcard) { + if strings.HasSuffix(strColName, bt.config.DeltaWildcard) { var exists bool _, exists = bt.oldValues[strColName] @@ -503,7 +463,7 @@ func (bt *Sqlbeat) generateEventFromRow(row *sql.Rows, columns []string, queryTy // Create the event and populate it event := common.MapStr{ "@timestamp": common.Time(rowAge), - "type": bt.dbType, + "type": bt.config.DBType, } // Get RawBytes from data @@ -540,7 +500,7 @@ func (bt *Sqlbeat) generateEventFromRow(row *sql.Rows, columns []string, queryTy } // If query type is single row and the column name ends with the deltaWildcard - if queryType == queryTypeSingleRow && strings.HasSuffix(strColName, bt.deltaWildcard) { + if queryType == queryTypeSingleRow && strings.HasSuffix(strColName, bt.config.DeltaWildcard) { var exists bool _, exists = bt.oldValues[strColName] diff --git a/config/config.go b/config/config.go index 79b9b32..3c2b2ec 100644 --- a/config/config.go +++ b/config/config.go @@ -3,21 +3,30 @@ package config +import "time" + type Config struct { - Sqlbeat SqlbeatConfig + Period time.Duration `config:"period"` + DBType string `config:"dbtype"` + Hostname string `config:"hostname"` + Port string `config:"port"` + Username string `config:"username"` + Password string `config:"password"` + EncryptedPassword string `config:"encryptedpassword"` + Database string `config:"database"` + PostgresSSLMode string `config:"postgressslmode"` + Queries []string `config:"queries"` + QueryTypes []string `config:"querytypes"` + DeltaWildcard string `config:"deltawildcard"` } -type SqlbeatConfig struct { - Period string `yaml:"period"` - DBType string `yaml:"dbtype"` - Hostname string `yaml:"hostname"` - Port string `yaml:"port"` - Username string `yaml:"username"` - Password string `yaml:"password"` - EncryptedPassword string `yaml:"encryptedpassword"` - Database string `yaml:"database"` - PostgresSSLMode string `yaml:"postgressslmode"` - Queries []string `yaml:"queries"` - QueryTypes []string `yaml:"querytypes"` - DeltaWildcard string `yaml:"deltawildcard"` +var DefaultConfig = Config{ + Period: 10 * time.Second, + DBType: "", + Hostname: "127.0.0.1", + Username: "sqlbeat_user", + Password: "sqlbeat_pass", + Database: "", + PostgresSSLMode: "disable", + DeltaWildcard: "__DELTA", } diff --git a/docs/fields.asciidoc b/docs/fields.asciidoc index bf6b99d..6902e37 100644 --- a/docs/fields.asciidoc +++ b/docs/fields.asciidoc @@ -1,60 +1,139 @@ //// -This file is generated! See etc/fields.yml and scripts/generate_field_docs.py +This file is generated! See _meta/fields.yml and scripts/generate_field_docs.py //// [[exported-fields]] -== Exported Fields += Exported Fields +[partintro] + +-- This document describes the fields that are exported by Sqlbeat. They are grouped in the following categories: -* <> -* <> +* <> +* <> +* <> + +-- +[[exported-fields-beat]] +== Beat Fields + +Contains common beat fields available in all event types. + + + +[float] +=== beat.name + +The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. + + +[float] +=== beat.hostname + +The hostname as returned by the operating system on which the Beat is running. -[[exported-fields-env]] -=== Common Fields -Contains common fields available in all event types. +[float] +=== beat.version +The version of the beat that generated this event. -==== @timestamp +[float] +=== @timestamp type: date -example: 2015-01-24 14:06:05.071000 +example: August 26th 2016, 12:35:53.332 -format: YYYY-MM-DDTHH:MM:SS.milliZ +format: date required: True -The timestamp of when the measurements were taken. The precision is in milliseconds. The timezone is UTC. +The timestamp when the event log record was generated. -==== type +[float] +=== tags -required: True +Arbitrary tags that can be set per Beat and per transaction type. -PLEASE UPDATE DOCUMENTATION +[float] +=== fields -==== count +type: dict -type: int +Contains user configurable fields. -required: True -The number of transactions that this event represents. This is generally the inverse of the sampling rate. For example, for a sample rate of 1/10, the count is 10. The count is used by the UIs to return estimated values. Reserved for future usage. +[[exported-fields-cloud]] +== Cloud Provider Metadata Fields +Metadata from cloud providers added by the add_cloud_metadata processor. -==== beat.name -Name of the Beat sending the events. If the shipper name is set in the configuration file, then that value is used. If it is not set, the hostname is used. +[float] +=== meta.cloud.provider -==== beat.hostname +example: ec2 -The hostname as returned by the operating system on which the Beat is running. +Name of the cloud provider. Possible values are ec2, gce, or digitalocean. + + +[float] +=== meta.cloud.instance_id + +Instance ID of the host machine. + + +[float] +=== meta.cloud.machine_type + +example: t2.medium + +Machine type of the host machine. + + +[float] +=== meta.cloud.availability_zone + +example: us-east-1c + +Availability zone in which this host is running. + + +[float] +=== meta.cloud.project_id + +example: project-x + +Name of the project in Google Cloud. + + +[float] +=== meta.cloud.region + +Region in which this host is running. + + +[[exported-fields-sqlbeat]] +== sqlbeat Fields + +None + + +[float] +=== counter + +type: long + +required: True + +PLEASE UPDATE DOCUMENTATION diff --git a/glide.lock b/glide.lock new file mode 100644 index 0000000..50618f9 --- /dev/null +++ b/glide.lock @@ -0,0 +1,141 @@ +hash: f07a47307563834c802ae0639116bc3cafb901efb66a48205e86460eff077694 +updated: 2017-08-16T10:08:59.739742805-04:00 +imports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/denisenkom/go-mssqldb + version: 8d4984e8baccbf5bfadd7f7e366fd61b7ccac38b +- name: github.com/eapache/go-resiliency + version: b86b1ec0dd4209a588dc1285cdd471e73525c0b3 + subpackages: + - breaker +- name: github.com/eapache/go-xerial-snappy + version: bb955e01b9346ac19dc29eb16586c90ded99a98c +- name: github.com/eapache/queue + version: ded5959c0d4e360646dc9e9908cff48666781367 +- name: github.com/elastic/beats + version: ffb035697fb3f8c0718aff81d68b7f9eddc91d09 + subpackages: + - libbeat/beat + - libbeat/cfgfile + - libbeat/common + - libbeat/common/dtfmt + - libbeat/common/file + - libbeat/common/fmtstr + - libbeat/common/jsontransform + - libbeat/common/match + - libbeat/common/op + - libbeat/common/streambuf + - libbeat/dashboards/dashboards + - libbeat/logp + - libbeat/monitoring + - libbeat/monitoring/adapter + - libbeat/outputs + - libbeat/outputs/codecs/format + - libbeat/outputs/codecs/json + - libbeat/outputs/console + - libbeat/outputs/elasticsearch + - libbeat/outputs/fileout + - libbeat/outputs/kafka + - libbeat/outputs/logstash + - libbeat/outputs/mode + - libbeat/outputs/mode/lb + - libbeat/outputs/mode/modeutil + - libbeat/outputs/mode/single + - libbeat/outputs/outil + - libbeat/outputs/redis + - libbeat/outputs/transport + - libbeat/paths + - libbeat/plugin + - libbeat/processors + - libbeat/processors/actions + - libbeat/processors/add_cloud_metadata + - libbeat/publisher + - libbeat/service + - metricbeat/schema + - metricbeat/schema/mapstriface +- name: github.com/elastic/go-lumber + version: 616041e345fc33c97bc0eb0fa6b388aa07bca3e1 + subpackages: + - client/v2 + - lj + - log + - protocol/v2 + - server/internal + - server/v2 +- name: github.com/elastic/go-ucfg + version: ec8488a52542c0c51e42e8ea204dcaff400bc644 + subpackages: + - cfgutil + - flag + - internal/parse + - json + - yaml +- name: github.com/garyburd/redigo + version: 8873b2f1995f59d4bcdd2b0dc9858e2cb9bf0c13 + subpackages: + - internal + - redis +- name: github.com/go-sql-driver/mysql + version: a0583e0143b1624142adab07e0e97fe106d99561 +- name: github.com/golang/snappy + version: d9eb7a3d35ec988b8585d4a0068e462c27d28380 +- name: github.com/joeshaw/multierror + version: 69b34d4ec901851247ae7e77d33909caf9df99ed +- name: github.com/klauspost/compress + version: 006acde2c5d283d2f8b8aa03d8f0cd2891c680cf + subpackages: + - flate + - zlib +- name: github.com/klauspost/cpuid + version: 09cded8978dc9e80714c4d85b0322337b0a1e5e0 +- name: github.com/klauspost/crc32 + version: 19b0b332c9e4516a6370a0456e6182c3b5036720 +- name: github.com/lib/pq + version: ee1442bda7bd1b6a84e913bdb421cb1874ec629d + subpackages: + - oid +- name: github.com/mitchellh/hashstructure + version: b098c52ef6beab8cd82bc4a32422cf54b890e8fa +- name: github.com/nranchev/go-libGeoIP + version: c78e8bd2dd3599feb21fd30886043979e82fe948 +- name: github.com/pierrec/lz4 + version: 5c9560bfa9ace2bf86080bf40d46b34ae44604df +- name: github.com/pierrec/xxHash + version: 5a004441f897722c627870a981d02b29924215fa + subpackages: + - xxHash32 +- name: github.com/pkg/errors + version: 01fa4104b9c248c8945d14d9f128454d5b28d595 +- name: github.com/rcrowley/go-metrics + version: ab2277b1c5d15c3cba104e9cbddbdfc622df5ad8 + subpackages: + - exp +- name: github.com/satori/go.uuid + version: 879c5887cd475cd7864858769793b2ceb0d44feb +- name: github.com/Shopify/sarama + version: f0996189c86dc27338468f2a9b10077c6d572b34 + repo: https://github.com/urso/sarama +- name: golang.org/x/crypto + version: 7f7c0c2d75ebb4e32a21396ce36e87b6dadc91c9 + subpackages: + - md4 +- name: golang.org/x/net + version: d58ca6618b994150e624f6888d871f4709db51a0 + subpackages: + - icmp + - ipv4 + - ipv6 + - proxy + - publicsuffix +- name: golang.org/x/sys + version: 62bee037599929a6e9146f29d10dd5208c43507d + subpackages: + - windows + - windows/svc + - windows/svc/debug +- name: gopkg.in/yaml.v2 + version: a83829b6f1293c91addabc89d0571c246397bbf4 +testImports: [] diff --git a/glide.yaml b/glide.yaml index 58a721b..361a71d 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,14 +1,17 @@ -package: . +package: github.com/adibendahan/sqlbeat import: - package: github.com/elastic/beats - version: d8ca37efa8888ce624e2cfaa90107e79fd41be1e + # This guid refs a 5.0.0_SNAPSHOT -- let's use a semantic version instead + #version: d8ca37efa8888ce624e2cfaa90107e79fd41be1e + version: ~5.5.1 subpackages: - libbeat/beat - libbeat/cfgfile - libbeat/common - libbeat/logp - package: github.com/go-sql-driver/mysql - version: 1421caf44f6464fd2ee8de694c7508ee13f92964 + version: ~1.3.0 + #version: 1421caf44f6464fd2ee8de694c7508ee13f92964 - package: github.com/denisenkom/go-mssqldb version: 8d4984e8baccbf5bfadd7f7e366fd61b7ccac38b - package: github.com/lib/pq diff --git a/main.go b/main.go index bf658c7..64e1a32 100644 --- a/main.go +++ b/main.go @@ -9,7 +9,7 @@ import ( ) func main() { - err := beat.Run("sqlbeat", "", beater.New()) + err := beat.Run("sqlbeat", "", beater.New) if err != nil { os.Exit(1) } diff --git a/sqlbeat.full.yml b/sqlbeat.full.yml new file mode 100644 index 0000000..17178c8 --- /dev/null +++ b/sqlbeat.full.yml @@ -0,0 +1,686 @@ +################### Sqlbeat Configuration Example ######################### + +############################# Sqlbeat ###################################### + +sqlbeat: + # Defines how often an event is sent to the output + period: 10s + + # Defines the DB type you are connecting, currently supporting 'mysql' / 'mssql' / 'postgres' + # This attribute is required -- there is no default value. + #dbtype: "mysql" + + # Defines the sql hostname that the beat will connect to + #hostname: "127.0.0.1" + + # Defines the sql port - leave commented for default ports + #port: "3306" + + # MAKE SURE THE USER ONLY HAS PERMISSIONS TO RUN THE QUERY DESIRED AND NOTHING ELSE. + # Defines the mysql user to use + #username: "sqlbeat_user" + + # Defines the mysql password to use - option #1 - plain text + #password: "sqlbeat_pass" + + # Defines the mysql password to use - option #2 - AES encryption (see github.com/adibendahan/mysqlbeat-password-encrypter) + #encryptedpassword: "2321f38819cf693951e88f00cd82" + + # Defines the database to connect, optional for all except DB type postgres + #database: "sqlbeat" + + # Defines SSL mode for postgres + #postgressslmode: "disable" + + # Defines the queries that will run - the query below is an example + #queries: [ "select * from tbl"] + + # Defines the queries result types + # 'single-row' will be translated as columnname:value + # 'two-columns' will be translated as value-column1:value-column2 for each row + # 'multiple-rows' each row will be a document (with columnname:value) + # 'show-slave-delay' will only send the `Seconds_Behind_Master` column from SHOW SLAVE STATUS (for MySQL use) + #querytypes: ["multiple-rows"] + + # Colums that end with the following wild card will report only delta in seconds ((neval - oldval)/timediff.Seconds()) + #deltawildcard: "__DELTA" + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# The internal queue size for bulk events in the processing pipeline. +# Do not modify this value. +#bulk_queue_size: 0 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, and DigitalOcean. +# +#processors: +#- add_cloud_metadata: +# + +#================================ Outputs ====================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "sqlbeat" plus date + # and generates [sqlbeat-]YYYY.MM.DD keys. + #index: "sqlbeat-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1s + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones. + + # Set to false to disable template loading. + #template.enabled: true + + # Template name. By default the template name is sqlbeat. + #template.name: "sqlbeat" + + # Path to template file + #template.path: "${path.config}/sqlbeat.template.json" + + # Overwrite existing template + #template.overwrite: false + + # If set to true, sqlbeat checks the Elasticsearch version at connect time, and if it + # is 2.x, it loads the file specified by the template.versions.2x.path setting. The + # default is true. + #template.versions.2x.enabled: true + + # Path to the Elasticsearch 2.x version of the template file. + #template.versions.2x.path: "${path.config}/sqlbeat.template-es2x.json" + + # If set to true, sqlbeat checks the Elasticsearch version at connect time, and if it + # is 6.x, it loads the file specified by the template.versions.6x.path setting. The + # default is true. + #template.versions.6x.enabled: true + + # Path to the Elasticsearch 6.x version of the template file. + #template.versions.6x.path: "${path.config}/sqlbeat.template-es6x.json" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + +#----------------------------- Logstash output --------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Number of batches to be send asynchronously to logstash while processing + # new batches. + #pipelining: 0 + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: 'sqlbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version sqlbeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The number of seconds to wait for new events between two producer API calls. + #flush_interval: 1s + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is sqlbeat. + #key: sqlbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/sqlbeat" + + # Name of the generated files. The default is `sqlbeat` and it generates + # files: `sqlbeat`, `sqlbeat.1`, `sqlbeat.2`, etc. + #filename: sqlbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every sqlbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the sqlbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the sqlbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the sqlbeat installation. This is the default base path +# for all the files in which sqlbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a sqlbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag. +#dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#dashboards.url: + +# The directory from where to read the dashboards. It is used instead of the URL +# when it has a value. +#dashboards.directory: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the URL when it has a value. +#dashboards.file: + +# If this option is enabled, the snapshot URL is used instead of the default URL. +#dashboards.snapshot: false + +# The URL from where to download the snapshot version of the dashboards. By default +# this has a value which is computed based on the Beat name and version. +#dashboards.snapshot_url + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#dashboards.beat: sqlbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#dashboards.index: + +#================================ Logging ====================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is info. +# Available log levels are: critical, error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# If enabled, sqlbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/sqlbeat + + # The name of the files where the logs are written to. + #name: sqlbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + diff --git a/sqlbeat.template-es2x.json b/sqlbeat.template-es2x.json new file mode 100644 index 0000000..9eb7859 --- /dev/null +++ b/sqlbeat.template-es2x.json @@ -0,0 +1,102 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": { + "enabled": false + } + }, + "_meta": { + "version": "5.5.1" + }, + "date_detection": false, + "dynamic_templates": [ + { + "strings_as_keyword": { + "mapping": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "match_mapping_type": "string" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "name": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "version": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + }, + "counter": { + "type": "long" + }, + "meta": { + "properties": { + "cloud": { + "properties": { + "availability_zone": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "instance_id": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "machine_type": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "project_id": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "provider": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "region": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + } + } + }, + "tags": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "sqlbeat-*" +} \ No newline at end of file diff --git a/sqlbeat.template-es6x.json b/sqlbeat.template-es6x.json new file mode 100644 index 0000000..92d7207 --- /dev/null +++ b/sqlbeat.template-es6x.json @@ -0,0 +1,87 @@ +{ + "mappings": { + "_default_": { + "_meta": { + "version": "5.5.1" + }, + "date_detection": false, + "dynamic_templates": [ + { + "strings_as_keyword": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "counter": { + "type": "long" + }, + "meta": { + "properties": { + "cloud": { + "properties": { + "availability_zone": { + "ignore_above": 1024, + "type": "keyword" + }, + "instance_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "machine_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "project_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "provider": { + "ignore_above": 1024, + "type": "keyword" + }, + "region": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + }, + "order": 0, + "settings": { + "index.mapping.total_fields.limit": 10000, + "index.refresh_interval": "5s" + }, + "template": "sqlbeat-*" +} \ No newline at end of file diff --git a/sqlbeat.template.json b/sqlbeat.template.json new file mode 100644 index 0000000..2f6de94 --- /dev/null +++ b/sqlbeat.template.json @@ -0,0 +1,90 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.5.1" + }, + "date_detection": false, + "dynamic_templates": [ + { + "strings_as_keyword": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "counter": { + "type": "long" + }, + "meta": { + "properties": { + "cloud": { + "properties": { + "availability_zone": { + "ignore_above": 1024, + "type": "keyword" + }, + "instance_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "machine_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "project_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "provider": { + "ignore_above": 1024, + "type": "keyword" + }, + "region": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + }, + "order": 0, + "settings": { + "index.mapping.total_fields.limit": 10000, + "index.refresh_interval": "5s" + }, + "template": "sqlbeat-*" +} \ No newline at end of file diff --git a/sqlbeat.yml b/sqlbeat.yml index c36edce..9db30a9 100644 --- a/sqlbeat.yml +++ b/sqlbeat.yml @@ -4,9 +4,10 @@ sqlbeat: # Defines how often an event is sent to the output - #period: 10s + period: 10s # Defines the DB type you are connecting, currently supporting 'mysql' / 'mssql' / 'postgres' + # This attribute is required -- there is no default value. #dbtype: "mysql" # Defines the sql hostname that the beat will connect to @@ -44,278 +45,58 @@ sqlbeat: # Colums that end with the following wild card will report only delta in seconds ((neval - oldval)/timediff.Seconds()) #deltawildcard: "__DELTA" -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features +#================================ General ===================================== -############################# Output ########################################## +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. -output: - - ### Elasticsearch as output - elasticsearch: - # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 - #hosts: ["127.0.0.1:9200"] - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "admin" - #password: "s3cr3t" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "sqlbeat" and generates - # [sqlbeat-]YYYY.MM.DD keys. - #index: "sqlbeat" - - # A template is used to set the mapping in Elasticsearch - # By default template loading is disabled and no template is loaded. - # These settings can be adjusted to load your own template or overwrite existing ones - #template: - - # Template name. By default the template name is sqlbeat. - #name: "sqlbeat" - - # Path to template file - #path: "sqlbeat.template.json" - - # Overwrite existing template - #overwrite: false - - # Optional HTTP Path - #path: "/elasticsearch" - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1 - - # Boolean that sets if the topology is kept in Elasticsearch. The default is - # false. This option makes sense only for Packetbeat. - #save_topology: false - - # The time to live in seconds for the topology information that is stored in - # Elasticsearch. The default is 15 seconds. - #topology_expire: 15 - - # tls configuration. By default is off. - #tls: - # List of root certificates for HTTPS server verifications - #certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #insecure: true - - # Configure cipher suites to be used for TLS connections - #cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #curve_types: [] - - # Configure minimum TLS version allowed for connection to logstash - #min_version: 1.0 - - # Configure maximum TLS version allowed for connection to logstash - #max_version: 1.2 - - - ### Logstash as output - #logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: sqlbeat +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 +#================================ Outputs ===================================== - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - - # Optional TLS. By default is off. - #tls: - # List of root certificates for HTTPS server verifications - #certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #insecure: true - - # Configure cipher suites to be used for TLS connections - #cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #curve_types: [] - - - ### File as output - #file: - # Path to the directory where to save the generated files. The option is mandatory. - #path: "/tmp/sqlbeat" - - # Name of the generated files. The default is `sqlbeat` and it generates files: `sqlbeat`, `sqlbeat.1`, `sqlbeat.2`, etc. - #filename: sqlbeat - - # Maximum size in kilobytes of each file. When this size is reached, the files are - # rotated. The default value is 10240 kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, the - # oldest file is deleted and the rest are shifted from last to first. The default - # is 7 files. - #number_of_files: 7 - - - ### Console output - # console: - # Pretty print json event - #pretty: false - - -############################# Shipper ######################################### - -shipper: - # The name of the shipper that publishes the network data. It can be used to group - # all the transactions sent by a single shipper in the web interface. - # If this options is not defined, the hostname is used. - #name: - - # The tags of the shipper are included in their own field with each - # transaction published. Tags make it easy to group servers by different - # logical properties. - #tags: ["service-X", "web-tier"] - - # Optional fields that you can specify to add additional information to the - # output. Fields can be scalar values, arrays, dictionaries, or any nested - # combination of these. - #fields: - # env: staging - - # If this option is set to true, the custom fields are stored as top-level - # fields in the output document instead of being grouped under a fields - # sub-dictionary. Default is false. - #fields_under_root: false - - # Uncomment the following if you want to ignore transactions created - # by the server on which the shipper is installed. This option is useful - # to remove duplicates if shippers are installed on multiple servers. - #ignore_outgoing: true - - # How often (in seconds) shippers are publishing their IPs to the topology map. - # The default is 10 seconds. - #refresh_topology_freq: 10 - - # Expiration time (in seconds) of the IPs published by a shipper to the topology map. - # All the IPs will be deleted afterwards. Note, that the value must be higher than - # refresh_topology_freq. The default is 15 seconds. - #topology_expire: 15 - - # Internal queue size for single events in processing pipeline - #queue_size: 1000 - - # Sets the maximum number of CPUs that can be executing simultaneously. The - # default is the number of logical CPUs available in the system. - #max_procs: - - # Configure local GeoIP database support. - # If no paths are not configured geoip is disabled. - #geoip: - #paths: - # - "/usr/share/GeoIP/GeoLiteCity.dat" - # - "/usr/local/var/GeoIP/GeoLiteCity.dat" - - -############################# Logging ######################################### - -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. -logging: - - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. - # Write all logging output to files. Beats automatically rotate files if rotateeverybytes - # limit is reached. - #to_files: false +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] - # To enable logging to files, to_files option has to be set to true - #files: - # The directory where the log files will written to. - #path: /var/log/sqlbeat + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" - # The name of the files where the logs are written to. - #name: sqlbeat +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" - # Enable debug output for selected components. To enable all selectors use ["*"] - # Other available selectors are beat, publish, service - # Multiple selectors can be chained. - #selectors: [ ] + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" - # Sets log level. The default log level is error. - # Available log levels are: critical, error, warning, info, debug - #level: error +#================================ Logging ===================================== +# Sets log level. The default log level is info. +# Available log levels are: critical, error, warning, info, debug +#logging.level: debug +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"]