Tyk Pump is a pluggable analytics purger to move Analytics generated by your Tyk nodes to any back-end.
- MongoDB (to replace built-in purging)
- CSV (updated, now supports all fields)
- ElasticSearch (2.0+)
- Graylog
- InfluxDB
- Moesif
- Splunk
- StatsD
- DogStatsD
- Hybrid (Tyk RPC)
- Prometheus
- Logz.io
- Kafka
- Stdout (i.e. for use by Datadog logging agent in Kubernetes)
Create a pump.conf
file:
{
"analytics_storage_type": "redis",
"analytics_storage_config": {
"type": "redis",
"host": "localhost",
"port": 6379,
"hosts": null,
"username": "",
"password": "",
"database": 0,
"optimisation_max_idle": 100,
"optimisation_max_active": 0,
"enable_cluster": false,
"redis_use_ssl": false,
"redis_ssl_insecure_skip_verify": false
},
"log_level":"info",
"log_format":"text",
"purge_delay": 1,
"health_check_endpoint_name": "hello",
"health_check_endpoint_port": 8083,
"pumps": {
"dummy": {
"type": "dummy",
"meta": {}
},
"mongo": {
"type": "mongo",
"meta": {
"collection_name": "tyk_analytics",
"mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}"
}
},
"mongo-pump-aggregate": {
"type": "mongo-pump-aggregate",
"meta": {
"mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}",
"use_mixed_collection": true,
"store_analytics_per_minute": false,
"track_all_paths": false
}
},
"csv": {
"type": "csv",
"meta": {
"csv_dir": "./"
}
},
"sql_aggregate": {
"name": "sql_aggregate",
"meta": {
"type": "postgres",
"connection_string": "host=sql_host port=sql_port user=sql_usr dbname=dbname password=sql_pw",
"table_sharding": true
}
},
"sql": {
"name": "sql",
"meta": {
"type": "postgres",
"connection_string": "host=sql_host port=sql_port user=sql_usr dbname=dbname password=sql_pw",
"table_sharding": false
}
},
"elasticsearch": {
"type": "elasticsearch",
"meta": {
"index_name": "tyk_analytics",
"elasticsearch_url": "http://localhost:9200",
"enable_sniffing": false,
"document_type": "tyk_analytics",
"rolling_index": false,
"extended_stats": false,
"version": "5",
"bulk_config":{
"workers": 2,
"flush_interval": 60
}
}
},
"influx": {
"type": "influx",
"meta": {
"database_name": "tyk_analytics",
"address": "http//localhost:8086",
"username": "root",
"password": "root",
"fields": [
"request_time"
],
"tags": [
"path",
"response_code",
"api_key",
"api_version",
"api_name",
"api_id",
"raw_request",
"ip_address",
"org_id",
"oauth_id"
]
}
},
"moesif": {
"type": "moesif",
"meta": {
"application_id": ""
}
},
"splunk": {
"type": "splunk",
"meta": {
"collector_token": "<token>",
"collector_url": "<url>",
"ssl_insecure_skip_verify": false,
"ssl_cert_file": "<cert-path>",
"ssl_key_file": "<key-path>",
"ssl_server_name": "<server-name>"
"enable_batch":true,
"batch_max_content_length":<max_content_length>
}
},
"statsd": {
"type": "statsd",
"meta": {
"address": "localhost:8125",
"fields": [
"request_time"
],
"tags": [
"path",
"response_code",
"api_key",
"api_version",
"api_name",
"api_id",
"raw_request",
"ip_address",
"org_id",
"oauth_id"
]
}
},
"dogstatsd": {
"type": "dogstatsd",
"meta": {
"address": "localhost:8125",
"namespace": "pump",
"async_uds": true,
"async_uds_write_timeout_seconds": 2,
"buffered": true,
"buffered_max_messages": 32,
"tags": [
"method",
"response_code",
"api_version",
"api_name",
"api_id",
"org_id",
"tracked",
"path",
"oauth_id"
]
}
},
"prometheus": {
"type": "prometheus",
"meta": {
"listen_address": "localhost:9090",
"path": "/metrics"
}
},
"graylog": {
"type": "graylog",
"meta": {
"host": "10.60.6.15",
"port": 12216,
"tags": [
"method",
"path",
"response_code",
"api_key",
"api_version",
"api_name",
"api_id",
"org_id",
"oauth_id",
"raw_request",
"request_time",
"raw_response",
"ip_address"
]
}
},
"hybrid": {
"type": "hybrid",
"meta": {
"rpc_key": "5b5fd341e6355b5eb194765e",
"api_key": "008d6d1525104ae77240f687bb866974",
"connection_string": "localhost:9090",
"aggregated": false,
"use_ssl": false,
"ssl_insecure_skip_verify": false,
"group_id": "",
"call_timeout": 30,
"ping_timeout": 60,
"rpc_pool_size": 30
}
},
"logzio": {
"type": "logzio",
"meta": {
"token": "<YOUR-LOGZ.IO-TOKEN>"
}
},
"kafka": {
"type": "kafka",
"meta": {
"broker": [
"localhost:9092"
],
"topic": "tyk-pump",
"use_ssl": true,
"ssl_insecure_skip_verify": false,
"client_id": "tyk-pump",
"timeout": 60,
"compressed": true,
"meta_data": {
"key": "value"
}
}
},
"syslog": {
"name": "syslog",
"meta": {
"transport": "udp",
"network_addr": "localhost:5140",
"log_level": 6,
"tag":"syslog-pump"
}
},
"stdout":{
"type": "stdout",
"meta" : {
"log_field_name": "tyk-analytics-record",
"format": "json"
}
}
},
"uptime_pump_config": {
"collection_name": "tyk_uptime_analytics",
"mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}"
},
"dont_purge_uptime_data": false,
"omit_detailed_recording": false,
"max_record_size": 1000
}
Settings are the same as for the original tyk.conf
for redis and for mongoDB.
purge_delay
- The number of seconds the Pump waits between checking for analytics data and purge it from Redis.
purge_chunk
- The maximum number of records to pull from Redis at a time. If it's unset or 0, all the analytics records in Redis are pulled. If it's setted, storage_expiration_time
is used to reset the analytics record TTL.
storage_expiration_time
- The number of seconds for the analytics records TTL. It only works if purge_chunk
is enabled. Defaults to 60 seconds.
log_level
- Set the logger details for tyk-pump. The posible values are: info
,debug
,error
and warn
. By default, the log level is info
.
log_format
- Set the logger format. The possible values are: text
and json
. By default, the log format is text
.
This feature adds a new configuration field in each pump called filters and its structure is the following:
"filters":{
"api_ids":[],
"org_ids":[],
"response_codes":[],
"skip_api_ids":[],
"skip_org_ids":[],
"skip_response_codes":[]
}
The fields api_ids, org_ids and response_codes works as allow list (APIs and orgs where we want to send the analytics records) and the fields skip_api_ids, skip_org_ids and skip_response_codes works as block list.
The priority is always block list configurations over allow list.
An example of configuration would be:
"csv": {
"type": "csv",
"filters": {
"org_ids": ["org1","org2"]
},
"meta": {
"csv_dir": "./bar"
}
}
You can configure a different timeout for each pump with the configuration option timeout
. Its default value is 0 seconds, which means that the pump will wait for the writing operation forever.
An example of this configuration would be:
"mongo": {
"type": "mongo",
"timeout":5,
"meta": {
"collection_name": "tyk_analytics",
"mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}"
}
}
In case that any pump doesn't have a configured timeout, and it takes more seconds to write than the value configured for the purge loop in the purge_delay
config option, you will see the following warning message: Pump PMP_NAME is taking more time than the value configured of purge_delay. You should try to set a timeout for this pump.
.
In case that you have a configured timeout, but it still takes more seconds to write than the value configured for the purge loop in the purge_delay
config option, you will see the following warning message: Pump PMP_NAME is taking more time than the value configured of purge_delay. You should try lowering the timeout configured for this pump.
.
Environment variables can be used to override the settings defined in the configuration file. See Environment Variables in our docs for details. Where an environment variable is specified, its value will take precedence over the value in the configuration file.
"analytics_storage_config": {
"type": "redis",
"host": "localhost",
"port": 6379,
"hosts": null,
"username": "",
"password": "",
"database": 0,
"optimisation_max_idle": 100,
"optimisation_max_active": 0,
"enable_cluster": false,
"redis_use_ssl": false,
"redis_ssl_insecure_skip_verify": false
},
redis_use_ssl
- Setting this to true to use SSL when connecting to Redis
redis_ssl_insecure_skip_verify
- Set this to true to tell Pump to ignore Redis' cert validation
dont_purge_uptime_data
- Setting this to false will create a pump that pushes uptime data to Uptime Pump, so the Dashboard can read it. Disable by setting to true
In uptime_pump_config
you can configure a mongo uptime pump. By default, the uptime pump is going to be mongo
type, so it's not necessary to specify it here.
The minimum required configurations for uptime pumps are:
collection_name
- That determines the uptime collection name in mongo. By default, tyk_uptime_analytics
.
mongo_url
- The uptime pump mongo connection url. It is usually something like "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}".
Supported in Tyk Pump v1.5.0+
In uptime_pump_config
you can configure a SQL uptime pump. To do that, you need to add the field uptime_type
with sql
value.
You can also use different types of SQL Uptime pumps, like postgres
or sqlite
using the type
field.
An example of a SQL Postgres uptime pump would be:
"uptime_pump_config": {
"uptime_type": "sql",
"type": "postgres",
"connection_string": "host=sql_host port=sql_port user=sql_usr dbname=dbname password=sql_pw",
"table_sharding": false
},
Take into account that you can also set log_level
field into the uptime_pump_config
to debug
,info
or warning
. By default, the SQL logger verbosity is silent
.
omit_detailed_recording
- Setting this to true will avoid writing raw_request and raw_response fields for each request in pumps. Defaults to false.
max_record_size
defines maximum size (in bytes) for Raw Request and Raw Response logs, this value defaults to 0. Is not set then tyk-pump will not trim any data and will store the full information.
This can also be set at a pump level. For example:
"csv": {
"type": "csv",
"max_record_size":1000,
"meta": {
"csv_dir": "./"
}
}
From v2.9.4, we have introduced a /health
endpoint to confirm the Pump is running. You need to configure the following settings:
health_check_endpoint_name
- The default is "hello"health_check_endpoint_port
- The default port is 8083
This returns a HTTP 200 OK response if the Pump is running.
The Tyk Dashboard uses the "mongo-pump-aggregate" collection to display analytics. This is different than the standard "mongo" pump plugin that will store individual analytic items into mongo. The aggregate functionality was built to be fast, as querying raw analytics is expensive in large data sets.
"index_name"
- The name of the index that all the analytics data will be placed in. Defaults to "tyk_analytics"
"elasticsearch_url"
- If sniffing is disabled, the URL that all data will be sent to. Defaults to "http://localhost:9200"
"enable_sniffing"
- If sniffing is enabled, the "elasticsearch_url" will be used to make a request to get a list of all the nodes in the cluster, the returned addresses will then be used. Defaults to false
"document_type"
- The type of the document that is created in ES. Defaults to "tyk_analytics"
"rolling_index"
- Appends the date to the end of the index name, so each days data is split into a different index name. E.g. tyk_analytics-2016.02.28 Defaults to false
"extended_stats"
- If set to true will include the following additional fields: Raw Request, Raw Response and User Agent.
"version"
- Specifies the ES version. Use "3" for ES 3.X, "5" for ES 5.X, "6" for ES 6.X, "7" for ES 7.X . Defaults to "3".
"disable_bulk"
- Disable batch writing. Defaults to false.
bulk_config
: Batch writing trigger configuration. Each option is an OR with eachother:
workers
: Number of workers. Defaults to 1.flush_interval
: Specifies the time in seconds to flush the data and send it to ES. Default disabled.bulk_actions
: Specifies the number of requests needed to flush the data and send it to ES. Defaults to 1000 requests. If it is needed, can be disabled with -1.bulk_size
: Specifies the size (in bytes) needed to flush the data and send it to ES. Defaults to 5MB. If it is needed, can be disabled with -1.
Moesif is a user-centric API analytics and monitoring service for APIs. More Info on Moesif for Tyk
"application_id"
- Moesif Application Id. You can find your Moesif Application Id from Moesif Dashboard -> Top Right Menu -> API Keys . Moesif recommends creating separate Application Ids for each environment such as Production, Staging, and Development to keep data isolated."request_header_masks"
- (optional) An option to mask a specific request header field. Type: String Array[] string
"request_body_masks"
- (optional) An option to mask a specific - request body field. Type: String Array[] string
"response_header_masks"
- (optional) An option to mask a specific response header field. Type: String Array[] string
"response_body_masks"
- (optional) An option to mask a specific response body field. Type: String Array[] string
"disable_capture_request_body"
- (optional) An option to disable logging of request body. Type: Boolean. Default value isfalse
."disable_capture_response_body"
- (optional) An option to disable logging of response body. Type: Boolean. Default value isfalse
."user_id_header"
- (optional) An optional field name to identify User from a request or response header. Type: String."company_id_header"
- (optional) An optional field name to identify Company (Account) from a request or response header. Type: String."authorization_header_name"
- (optional) An optional request header field name to used to identify the User in Moesif. Type: String. Default value isauthorization
."authorization_user_id_field"
- (optional) An optional field name use to parse the User from authorization header in Moesif. Type: String. Default value issub
."enable_bulk"
- Set this totrue
to enablebulk_config
."bulk_config"
- (optional) Batch writing trigger configuration."event_queue_size"
- (optional) An optional field name which specify the maximum number of events to hold in queue before sending to Moesif. In case of network issues when not able to connect/send event to Moesif, skips adding new events to the queue to prevent memory overflow. Type: int. Default value is10000
."batch_size"
- (optional) An optional field name which specify the maximum batch size when sending to Moesif. Type: int. Default value is200
."timer_wake_up_seconds"
- (optional) An optional field which specifies a time (every n seconds) how often background thread runs to send events to moesif. Type: int. Default value is2
seconds.
Hybrid Pump allows you to install Tyk Pump inside Multi-Cloud or MDCB Worker installations. You can configure Tyk Pump to send data to the source of your choice (i.e. ElasticSearch), and in parallel, forward analytics to the Tyk Cloud. Additionally, you can set the aggregated flag to send only aggregated analytics to MDCB or Tyk Cloud, in order to save network bandwidth between DCs.
NOTE: Make sure your tyk.conf has analytics_config.type set to empty string value.
rpc_key - Put your organization ID in this field.
api_key - This the API key of a user used to authenticate and authorise the Gateway’s access through MDCB. The user should be a standard Dashboard user with minimal privileges so as to reduce risk if compromised. The suggested security settings are read for Real-time notifications and the remaining options set to deny.
aggregated - Set this field to true to send only aggregated analytics to MDCB or Tyk Cloud.
connection_string - The MDCB instance or load balancer.
use_ssl - Set this field to true if you need secured connection (default value is false).
ssl_insecure_skip_verify - Set this field to true if you use self signed certificate.
group_id - This is the “zone” that this instance inhabits, e.g. the DC it lives in. It must be unique to each slave cluster / DC.
call_timeout - This is the timeout (in milliseconds) for RPC calls.
rpc_pool_size - This is maximum number of connections to MDCB.
Prometheus is an open-source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.
Add the following section to expose "/metrics" endpoint:
"prometheus": {
"type": "prometheus",
"meta": {
"listen_address": "localhost:9090",
"path": "/metrics"
}
},
Note
- When run as docker image then "listen_address": ":9090"
Tyk expose the following counters:
- tyk_http_status{code, api}
- tyk_http_status_per_path{code, api, path, method}
- tyk_http_status_per_key{code, key}
- tyk_http_status_per_oauth_client{code, client_id}
And the following Histogram for latencies:
- tyk_latency{type, api}
address
: address of the datadog agent including host & portnamespace
: prefix for your metrics to datadogasync_uds
: Enable async UDS over UDP https://github.com/Datadog/datadog-go#unix-domain-sockets-clientasync_uds_write_timeout_seconds
: Integer write timeout in seconds ifasync_uds: true
buffered
: Enable buffering of messagesbuffered_max_messages
: Max messages in single datagram ifbuffered: true
. Default 16sample_rate
: default 1 which equates to 100% of requests. To sample at 50%, set to 0.5tags
: List of tags to be added to the metric. The possible options are listed in the below example
If no tag is specified the fallback behavior is to use the below tags:
path
method
response_code
api_version
api_name
api_id
org_id
tracked
oauth_id
Note that this configuration can generate significant charges due to the unbound nature of the path
tag.
"dogstatsd": {
"type": "dogstatsd",
"meta": {
"address": "localhost:8125",
"namespace": "pump",
"async_uds": true,
"async_uds_write_timeout_seconds": 2,
"buffered": true,
"buffered_max_messages": 32,
"sample_rate": 0.5,
"tags": [
"method",
"response_code",
"api_version",
"api_name",
"api_id",
"org_id",
"tracked",
"path",
"oauth_id"
]
}
},
On startup, you should see the loaded configs when initializing the dogstatsd pump
[May 10 15:23:44] INFO dogstatsd: initializing pump
[May 10 15:23:44] INFO dogstatsd: namespace: pump.
[May 10 15:23:44] INFO dogstatsd: sample_rate: 50%
[May 10 15:23:44] INFO dogstatsd: buffered: true, max_messages: 32
[May 10 15:23:44] INFO dogstatsd: async_uds: true, write_timeout: 2s
Setting up Splunk with a HTTP Event Collector
collector_token
: address of the datadog agent including host & portcollector_url
: endpoint the Pump will send analytics too. Should look something like:
https://splunk:8088/services/collector/event
ssl_insecure_skip_verify
: Controls whether the pump client verifies the Splunk server's certificate chain and host name.obfuscate_api_keys
: (optional) Controls whether the pump client should hide the API key. In case you still need substring of the value, check the next option. Type: Boolean. Default value isfalse
.obfuscate_api_keys_length
: (optional) Define the number of the characters from the end of the API key. Theobfuscate_api_keys
should be set totrue
. Type: Integer. Default value is0
.fields
: (optional) Define which Analytics fields should participate in the Splunk event. Check the available fields in the example below. Type: String Array[] string
. Default value is["method", "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]
ignore_tag_prefix_list
: (optional) Choose which tags to be ignored by the Splunk Pump. Keep in mind that the tag name and value are hyphenated. Type: Type: String Array[] string
. Default value is[]
enable_batch
: If this is set totrue
, pump is going to send the analytics records in batch to Splunk. Type: Boolean. Default value isfalse
.max_content_length
: Max content length in bytes to be sent in batch requests. It should match themax_content_length
configured in Splunk. If the purged analytics records size don't reach the amount of bytes, they're send anyways in eachpurge_loop
. Type: Integer. Default value is 838860800 (~ 800 MB), the same default value as Splunk config.
Example:
"splunk": {
"type": "splunk",
"meta": {
"collector_token": "<token>",
"collector_url": "<url>",
"ssl_insecure_skip_verify": false,
"ssl_cert_file": "<cert-path>",
"ssl_key_file": "<key-path>",
"ssl_server_name": "<server-name>",
"obfuscate_api_keys": true,
"obfuscate_api_keys_length": 10,
"enable_batch":true,
"fields": [
"method",
"host",
"path",
"raw_path",
"content_length",
"user_agent",
"response_code",
"api_key",
"time_stamp",
"api_version",
"api_name",
"api_id",
"org_id",
"oauth_id",
"raw_request",
"request_time",
"raw_response",
"ip_address",
"geo",
"network",
"latency",
"tags",
"alias",
"track_path"
],
"ignore_tag_prefix_list": [
"key-",
"org-",
"api-",
"original-path-",
]
}
},
Logz.io is a cloud observability platform providing Log Management built on ELK, Infrastructure Monitoring based on Grafana, and an ELK-based Cloud SIEM.
The following configuration values are available:
Example simplest configuration just needs the token for sending data to your logzio account.
"logzio": {
"type": "logzio",
"meta": {
"token": "<YOUR-LOGZ.IO-TOKEN>"
}
}
More advanced fields:
meta.url
- If you do not want to use the default Logzio url i.e. when using a proxy. Default is https://listener.logz.io:8071
meta.queue_dir
- The directory for the queue.
meta.drain_duration
- Set drain duration (flush logs on disk). Default value is 3s
meta.disk_threshold
- Set disk queue threshold, once the threshold is crossed the sender will not enqueue the received logs. Default value is 98
(percentage of disk).
meta.check_disk_space
- Set the sender to check if it crosses the maximum allowed disk usage. Default value is true
.
broker
: The list of brokers used to discover the partitions available on the kafka cluster. E.g. "localhost:9092"use_ssl
: Enables SSL connection.ssl_insecure_skip_verify
: Controls whether the pump client verifies the kafka server's certificate chain and host name.client_id
: Unique identifier for client connections established with Kafka.topic
: The topic that the writer will produce messages to.timeout
: Timeout is the maximum amount of time will wait for a connect or write to complete.compressed
: Enable "github.com/golang/snappy" codec to be used to compress Kafka messages. By default is falsemeta_data
: Can be used to set custom metadata inside the kafka messagessl_cert_file
: Can be used to set custom certificate file for authentication with kafka.ssl_key_file
: Can be used to set custom key file for authentication with kafka.
Supported in Tyk Pump v1.0.0+
"transport"
- Possible values are udp, tcp, tls
in string form
"network_addr"
- Host & Port combination of your syslog daemon ie: "localhost:5140"
"log_level"
- The severity level, an integer from 0-7, based off the Standard: Syslog Severity Levels
"tag"
- Prefix tag
When working with FluentD, you should provide a FluentD Parser based on the OS you are using so that FluentD can correctly read the logs
"syslog": {
"name": "syslog",
"meta": {
"transport": "udp",
"network_addr": "localhost:5140",
"log_level": 6,
"tag": "syslog-pump"
}
log_field_name
- Root name of the JSON object the analytics record is nested in
format
- Format of the analytics logs. Default is text
if json
is not explicitly specified. When JSON logging is used all pump logs to stdout will be JSON.
"stdout": {
"type": "stdout",
"meta": {
"log_field_name": "tyk-analytics-record",
"format": "json"
}
}
Supported in Tyk Pump v1.5.0+
type
- The supported and tested types are sqlite
and postgres
.
connection_string
- Specifies the connection string to the database. For example, for sqlite
it would usually work specifying the path/name of the database and for postgres
, specifying the host, port, user, password and dbname.
log_level
- Specifies the SQL log verbosity. The possible values are: info
,error
and warning
. By default, the value is silent
, which means that it won't log any SQL query.
table_sharding
- Specifies if all the analytics records are going to be stored in one table or in multiple tables (one per day). By default, false
.
If table_sharding
is false
, all the records are going to be stored in tyk_analytics
table. Instead, if it's true
, all the records of the day are going to be stored in tyk_analytics_YYYYMMDD
table, where YYYYMMDD
is going to change depending on the date.
batch_size
- Specifies the amount of records that are going to be written each batch. Type int. By default, it writes 1000 records max per batch.
For example:
"sql": {
"name": "sql",
"meta": {
"type": "postgres",
"connection_string": "host=localhost port=5432 user=admin dbname=postgres_test password=test",
"table_sharding": false
}
}
Supported in Tyk Pump v1.5.0+
type
- The supported and tested types are sqlite
and postgres
.
connection_string
- Specifies the connection string to the database. For example, for sqlite
it would usually work specifying the path/name of the database and for postgres
, specifying the host, port, user, password and dbname.
log_level
- Specifies the SQL log verbosity. The possible values are: info
,error
and warning
. By default, the value is silent
, which means that it won't log any SQL query.
track_all_paths
- Specifies if it should store aggregated data for all the endpoints. By default, false
which means that only store aggregated data for tracked endpoints
.
ignore_tag_prefix_list
- Specifies prefixes of tags that should be ignored.
table_sharding
- Specifies if all the analytics records are going to be stored in one table or in multiple tables (one per day). By default, false
.
If table_sharding
is false
, all the records are going to be stored in tyk_aggregated
table. Instead, if it's true
, all the records of the day are going to be stored in tyk_aggregated_YYYYMMDD
table, where YYYYMMDD
is going to change depending on the date.
batch_size
- Specifies the amount of records that are going to be written each batch. Type int. By default, it writes 1000 records max per batch.
For example:
"sql_aggregate": {
"name": "sql_aggregate",
"meta": {
"type": "postgres",
"connection_string": "host=localhost port=5432 user=admin dbname=postgres_test password=test",
"table_sharding": false
}
}
- Download dependent packages:
go get -t -d -v ./...
- Compile:
go build -v ./...
- Test
go test -v ./...
From Tyk Pump v0.6.0 you can now create multiple pumps of the same type by by setting the top level type as a custom values. For example:
"csv": {
"type": "csv",
"meta": {
"csv_dir": "./"
}
},
"csv_alt": {
"type": "csv",
"meta": {
"csv_dir": "./"
}
}