diff --git a/devel/concept/components.html b/devel/concept/components.html index 9cfb2fbfe..d18ed4e69 100644 --- a/devel/concept/components.html +++ b/devel/concept/components.html @@ -698,15 +698,6 @@ - - -
Component diagram of a typical setup:
- -All components are loosely coupled, thus for non-pgwatch components -(pgwatch components are only the metrics collector) +(pgwatch components are only the metrics collector) you can decide to make use of an already existing installation of Postgres, Grafana or Prometheus and run additionally just the pgwatch collector.
diff --git a/devel/gallery/isoflow-architecture-diagram.json b/devel/gallery/isoflow-architecture-diagram.json new file mode 100644 index 000000000..1f52f4f65 --- /dev/null +++ b/devel/gallery/isoflow-architecture-diagram.json @@ -0,0 +1 @@ +{"version":"2.3.0","title":"Untitled","colors":[{"id":"color1","value":"#a5b8f3"},{"id":"color2","value":"#bbadfb"},{"id":"color3","value":"#f4eb8e"},{"id":"color4","value":"#f0aca9"},{"id":"color5","value":"#fad6ac"},{"id":"color6","value":"#a8dc9d"},{"id":"color7","value":"#b3e5e3"}],"icons":[{"id":"block","name":"block","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/block.svg","isIsometric":true,"collection":"isoflow"},{"id":"cache","name":"cache","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/cache.svg","isIsometric":true,"collection":"isoflow"},{"id":"cardterminal","name":"cardterminal","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/cardterminal.svg","isIsometric":true,"collection":"isoflow"},{"id":"cloud","name":"cloud","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/cloud.svg","isIsometric":true,"collection":"isoflow"},{"id":"cronjob","name":"cronjob","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/cronjob.svg","isIsometric":true,"collection":"isoflow"},{"id":"cube","name":"cube","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/cube.svg","isIsometric":true,"collection":"isoflow"},{"id":"desktop","name":"desktop","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/desktop.svg","isIsometric":true,"collection":"isoflow"},{"id":"diamond","name":"diamond","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/diamond.svg","isIsometric":true,"collection":"isoflow"},{"id":"dns","name":"dns","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/dns.svg","isIsometric":true,"collection":"isoflow"},{"id":"document","name":"document","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/document.svg","isIsometric":true,"collection":"isoflow"},{"id":"firewall","name":"firewall","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/firewall.svg","isIsometric":true,"collection":"isoflow"},{"id":"function-module","name":"function-module","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/function-module.svg","isIsometric":true,"collection":"isoflow"},{"id":"image","name":"image","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/image.svg","isIsometric":true,"collection":"isoflow"},{"id":"laptop","name":"laptop","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/laptop.svg","isIsometric":true,"collection":"isoflow"},{"id":"loadbalancer","name":"loadbalancer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/loadbalancer.svg","isIsometric":true,"collection":"isoflow"},{"id":"lock","name":"lock","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/lock.svg","isIsometric":true,"collection":"isoflow"},{"id":"mail","name":"mail","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/mail.svg","isIsometric":true,"collection":"isoflow"},{"id":"mailmultiple","name":"mailmultiple","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/mailmultiple.svg","isIsometric":true,"collection":"isoflow"},{"id":"mobiledevice","name":"mobiledevice","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/mobiledevice.svg","isIsometric":true,"collection":"isoflow"},{"id":"office","name":"office","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/office.svg","isIsometric":true,"collection":"isoflow"},{"id":"package-module","name":"package-module","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/package-module.svg","isIsometric":true,"collection":"isoflow"},{"id":"paymentcard","name":"paymentcard","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/paymentcard.svg","isIsometric":true,"collection":"isoflow"},{"id":"plane","name":"plane","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/plane.svg","isIsometric":true,"collection":"isoflow"},{"id":"printer","name":"printer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/printer.svg","isIsometric":true,"collection":"isoflow"},{"id":"pyramid","name":"pyramid","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/pyramid.svg","isIsometric":true,"collection":"isoflow"},{"id":"queue","name":"queue","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/queue.svg","isIsometric":true,"collection":"isoflow"},{"id":"router","name":"router","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/router.svg","isIsometric":true,"collection":"isoflow"},{"id":"server","name":"server","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/server.svg","isIsometric":true,"collection":"isoflow"},{"id":"speech","name":"speech","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/speech.svg","isIsometric":true,"collection":"isoflow"},{"id":"sphere","name":"sphere","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/sphere.svg","isIsometric":true,"collection":"isoflow"},{"id":"storage","name":"storage","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/storage.svg","isIsometric":true,"collection":"isoflow"},{"id":"switch-module","name":"switch-module","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/switch-module.svg","isIsometric":true,"collection":"isoflow"},{"id":"tower","name":"tower","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/tower.svg","isIsometric":true,"collection":"isoflow"},{"id":"truck-2","name":"truck-2","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/truck-2.svg","isIsometric":true,"collection":"isoflow"},{"id":"truck","name":"truck","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/truck.svg","isIsometric":true,"collection":"isoflow"},{"id":"user","name":"user","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/user.svg","isIsometric":true,"collection":"isoflow"},{"id":"vm","name":"vm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/isoflow/vm.svg","isIsometric":true,"collection":"isoflow"},{"id":"_aws_","name":"_aws_","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/_aws_.svg","isIsometric":false,"collection":"aws"},{"id":"aws-activate","name":"aws-activate","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-activate.svg","isIsometric":false,"collection":"aws"},{"id":"aws-alexa-for-business","name":"aws-alexa-for-business","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-alexa-for-business.svg","isIsometric":false,"collection":"aws"},{"id":"aws-amplify","name":"aws-amplify","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-amplify.svg","isIsometric":false,"collection":"aws"},{"id":"aws-analytics","name":"aws-analytics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-analytics.svg","isIsometric":false,"collection":"aws"},{"id":"aws-apache-mxnet-on-aws","name":"aws-apache-mxnet-on-aws","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-apache-mxnet-on-aws.svg","isIsometric":false,"collection":"aws"},{"id":"aws-api-gateway","name":"aws-api-gateway","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-api-gateway.svg","isIsometric":false,"collection":"aws"},{"id":"aws-app-mesh","name":"aws-app-mesh","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-app-mesh.svg","isIsometric":false,"collection":"aws"},{"id":"aws-app-runner","name":"aws-app-runner","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-app-runner.svg","isIsometric":false,"collection":"aws"},{"id":"aws-appconfig","name":"aws-appconfig","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-appconfig.svg","isIsometric":false,"collection":"aws"},{"id":"aws-appflow","name":"aws-appflow","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-appflow.svg","isIsometric":false,"collection":"aws"},{"id":"aws-application-auto-scaling","name":"aws-application-auto-scaling","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-application-auto-scaling.svg","isIsometric":false,"collection":"aws"},{"id":"aws-application-cost-profiler","name":"aws-application-cost-profiler","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-application-cost-profiler.svg","isIsometric":false,"collection":"aws"},{"id":"aws-application-discovery-service","name":"aws-application-discovery-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-application-discovery-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-application-integration","name":"aws-application-integration","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-application-integration.svg","isIsometric":false,"collection":"aws"},{"id":"aws-application-migration-service","name":"aws-application-migration-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-application-migration-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-appstream","name":"aws-appstream","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-appstream.svg","isIsometric":false,"collection":"aws"},{"id":"aws-appsync","name":"aws-appsync","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-appsync.svg","isIsometric":false,"collection":"aws"},{"id":"aws-artifact","name":"aws-artifact","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-artifact.svg","isIsometric":false,"collection":"aws"},{"id":"aws-athena","name":"aws-athena","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-athena.svg","isIsometric":false,"collection":"aws"},{"id":"aws-audit-manager","name":"aws-audit-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-audit-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-augmented-ai-a2i","name":"aws-augmented-ai-a2i","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-augmented-ai-a2i.svg","isIsometric":false,"collection":"aws"},{"id":"aws-aurora","name":"aws-aurora","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-aurora.svg","isIsometric":false,"collection":"aws"},{"id":"aws-auto-scaling","name":"aws-auto-scaling","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-auto-scaling.svg","isIsometric":false,"collection":"aws"},{"id":"aws-backint-agent","name":"aws-backint-agent","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-backint-agent.svg","isIsometric":false,"collection":"aws"},{"id":"aws-backup","name":"aws-backup","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-backup.svg","isIsometric":false,"collection":"aws"},{"id":"aws-batch","name":"aws-batch","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-batch.svg","isIsometric":false,"collection":"aws"},{"id":"aws-billing-conductor","name":"aws-billing-conductor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-billing-conductor.svg","isIsometric":false,"collection":"aws"},{"id":"aws-blockchain","name":"aws-blockchain","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-blockchain.svg","isIsometric":false,"collection":"aws"},{"id":"aws-bottlerocket","name":"aws-bottlerocket","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-bottlerocket.svg","isIsometric":false,"collection":"aws"},{"id":"aws-braket","name":"aws-braket","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-braket.svg","isIsometric":false,"collection":"aws"},{"id":"aws-budgets","name":"aws-budgets","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-budgets.svg","isIsometric":false,"collection":"aws"},{"id":"aws-business-applications","name":"aws-business-applications","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-business-applications.svg","isIsometric":false,"collection":"aws"},{"id":"aws-certificate-manager","name":"aws-certificate-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-certificate-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-chatbot","name":"aws-chatbot","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-chatbot.svg","isIsometric":false,"collection":"aws"},{"id":"aws-chime-sdk","name":"aws-chime-sdk","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-chime-sdk.svg","isIsometric":false,"collection":"aws"},{"id":"aws-chime-voice-connector","name":"aws-chime-voice-connector","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-chime-voice-connector.svg","isIsometric":false,"collection":"aws"},{"id":"aws-chime","name":"aws-chime","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-chime.svg","isIsometric":false,"collection":"aws"},{"id":"aws-client-vpn","name":"aws-client-vpn","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-client-vpn.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloud-control-api","name":"aws-cloud-control-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloud-control-api.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloud-development-kit","name":"aws-cloud-development-kit","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloud-development-kit.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloud-directory","name":"aws-cloud-directory","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloud-directory.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloud-financial-management","name":"aws-cloud-financial-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloud-financial-management.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloud-map","name":"aws-cloud-map","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloud-map.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloud-wan","name":"aws-cloud-wan","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloud-wan.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloud9","name":"aws-cloud9","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloud9.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudendure-disaster-recovery","name":"aws-cloudendure-disaster-recovery","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudendure-disaster-recovery.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudformation","name":"aws-cloudformation","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudformation.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudfront","name":"aws-cloudfront","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudfront.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudhsm","name":"aws-cloudhsm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudhsm.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudsearch","name":"aws-cloudsearch","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudsearch.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudshell","name":"aws-cloudshell","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudshell.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudtrail","name":"aws-cloudtrail","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudtrail.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cloudwatch","name":"aws-cloudwatch","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cloudwatch.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codeartifact","name":"aws-codeartifact","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codeartifact.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codebuild","name":"aws-codebuild","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codebuild.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codecommit","name":"aws-codecommit","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codecommit.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codedeploy","name":"aws-codedeploy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codedeploy.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codeguru","name":"aws-codeguru","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codeguru.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codepipeline","name":"aws-codepipeline","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codepipeline.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codestar","name":"aws-codestar","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codestar.svg","isIsometric":false,"collection":"aws"},{"id":"aws-codewhisperer","name":"aws-codewhisperer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-codewhisperer.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cognito","name":"aws-cognito","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cognito.svg","isIsometric":false,"collection":"aws"},{"id":"aws-command-line-interface","name":"aws-command-line-interface","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-command-line-interface.svg","isIsometric":false,"collection":"aws"},{"id":"aws-comprehend-medical","name":"aws-comprehend-medical","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-comprehend-medical.svg","isIsometric":false,"collection":"aws"},{"id":"aws-comprehend","name":"aws-comprehend","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-comprehend.svg","isIsometric":false,"collection":"aws"},{"id":"aws-compute-optimizer","name":"aws-compute-optimizer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-compute-optimizer.svg","isIsometric":false,"collection":"aws"},{"id":"aws-compute","name":"aws-compute","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-compute.svg","isIsometric":false,"collection":"aws"},{"id":"aws-config","name":"aws-config","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-config.svg","isIsometric":false,"collection":"aws"},{"id":"aws-connect","name":"aws-connect","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-connect.svg","isIsometric":false,"collection":"aws"},{"id":"aws-console-mobile-application","name":"aws-console-mobile-application","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-console-mobile-application.svg","isIsometric":false,"collection":"aws"},{"id":"aws-containers","name":"aws-containers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-containers.svg","isIsometric":false,"collection":"aws"},{"id":"aws-control-tower","name":"aws-control-tower","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-control-tower.svg","isIsometric":false,"collection":"aws"},{"id":"aws-corretto","name":"aws-corretto","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-corretto.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cost-and-usage-report","name":"aws-cost-and-usage-report","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cost-and-usage-report.svg","isIsometric":false,"collection":"aws"},{"id":"aws-cost-explorer","name":"aws-cost-explorer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-cost-explorer.svg","isIsometric":false,"collection":"aws"},{"id":"aws-customer-enablement","name":"aws-customer-enablement","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-customer-enablement.svg","isIsometric":false,"collection":"aws"},{"id":"aws-data-exchange","name":"aws-data-exchange","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-data-exchange.svg","isIsometric":false,"collection":"aws"},{"id":"aws-data-pipeline","name":"aws-data-pipeline","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-data-pipeline.svg","isIsometric":false,"collection":"aws"},{"id":"aws-database-migration-service","name":"aws-database-migration-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-database-migration-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-database","name":"aws-database","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-database.svg","isIsometric":false,"collection":"aws"},{"id":"aws-datasync","name":"aws-datasync","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-datasync.svg","isIsometric":false,"collection":"aws"},{"id":"aws-deep-learning-amis","name":"aws-deep-learning-amis","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-deep-learning-amis.svg","isIsometric":false,"collection":"aws"},{"id":"aws-deep-learning-containers","name":"aws-deep-learning-containers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-deep-learning-containers.svg","isIsometric":false,"collection":"aws"},{"id":"aws-deepcomposer","name":"aws-deepcomposer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-deepcomposer.svg","isIsometric":false,"collection":"aws"},{"id":"aws-deeplens","name":"aws-deeplens","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-deeplens.svg","isIsometric":false,"collection":"aws"},{"id":"aws-deepracer","name":"aws-deepracer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-deepracer.svg","isIsometric":false,"collection":"aws"},{"id":"aws-detective","name":"aws-detective","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-detective.svg","isIsometric":false,"collection":"aws"},{"id":"aws-developer-tools","name":"aws-developer-tools","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-developer-tools.svg","isIsometric":false,"collection":"aws"},{"id":"aws-device-farm","name":"aws-device-farm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-device-farm.svg","isIsometric":false,"collection":"aws"},{"id":"aws-devops-guru","name":"aws-devops-guru","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-devops-guru.svg","isIsometric":false,"collection":"aws"},{"id":"aws-direct-connect","name":"aws-direct-connect","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-direct-connect.svg","isIsometric":false,"collection":"aws"},{"id":"aws-directory-service","name":"aws-directory-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-directory-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-distro-for-opentelemetry","name":"aws-distro-for-opentelemetry","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-distro-for-opentelemetry.svg","isIsometric":false,"collection":"aws"},{"id":"aws-documentdb","name":"aws-documentdb","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-documentdb.svg","isIsometric":false,"collection":"aws"},{"id":"aws-dynamodb","name":"aws-dynamodb","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-dynamodb.svg","isIsometric":false,"collection":"aws"},{"id":"aws-ec2-auto-scaling","name":"aws-ec2-auto-scaling","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-ec2-auto-scaling.svg","isIsometric":false,"collection":"aws"},{"id":"aws-ec2-image-builder","name":"aws-ec2-image-builder","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-ec2-image-builder.svg","isIsometric":false,"collection":"aws"},{"id":"aws-ec2-m5n","name":"aws-ec2-m5n","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-ec2-m5n.svg","isIsometric":false,"collection":"aws"},{"id":"aws-ec2-r5n","name":"aws-ec2-r5n","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-ec2-r5n.svg","isIsometric":false,"collection":"aws"},{"id":"aws-ec2","name":"aws-ec2","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-ec2.svg","isIsometric":false,"collection":"aws"},{"id":"aws-ecs-anywhere","name":"aws-ecs-anywhere","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-ecs-anywhere.svg","isIsometric":false,"collection":"aws"},{"id":"aws-efs","name":"aws-efs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-efs.svg","isIsometric":false,"collection":"aws"},{"id":"aws-eks-anywhere","name":"aws-eks-anywhere","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-eks-anywhere.svg","isIsometric":false,"collection":"aws"},{"id":"aws-eks-cloud","name":"aws-eks-cloud","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-eks-cloud.svg","isIsometric":false,"collection":"aws"},{"id":"aws-eks-distro","name":"aws-eks-distro","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-eks-distro.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-beanstalk","name":"aws-elastic-beanstalk","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-beanstalk.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-block-store","name":"aws-elastic-block-store","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-block-store.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-container-kubernetes","name":"aws-elastic-container-kubernetes","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-container-kubernetes.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-container-registry","name":"aws-elastic-container-registry","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-container-registry.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-container-service","name":"aws-elastic-container-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-container-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-fabric-adapter","name":"aws-elastic-fabric-adapter","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-fabric-adapter.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-inference","name":"aws-elastic-inference","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-inference.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-kubernetes-service","name":"aws-elastic-kubernetes-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-kubernetes-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-load-balancing","name":"aws-elastic-load-balancing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-load-balancing.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elastic-transcoder","name":"aws-elastic-transcoder","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elastic-transcoder.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elasticache","name":"aws-elasticache","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elasticache.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-appliances-&-software","name":"aws-elemental-appliances-&-software","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-appliances-&-software.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-conductor","name":"aws-elemental-conductor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-conductor.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-delta","name":"aws-elemental-delta","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-delta.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-link","name":"aws-elemental-link","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-link.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-live","name":"aws-elemental-live","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-live.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-mediaconnect","name":"aws-elemental-mediaconnect","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-mediaconnect.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-mediaconvert","name":"aws-elemental-mediaconvert","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-mediaconvert.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-medialive","name":"aws-elemental-medialive","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-medialive.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-mediapackage","name":"aws-elemental-mediapackage","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-mediapackage.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-mediastore","name":"aws-elemental-mediastore","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-mediastore.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-mediatailor","name":"aws-elemental-mediatailor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-mediatailor.svg","isIsometric":false,"collection":"aws"},{"id":"aws-elemental-server","name":"aws-elemental-server","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-elemental-server.svg","isIsometric":false,"collection":"aws"},{"id":"aws-emr","name":"aws-emr","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-emr.svg","isIsometric":false,"collection":"aws"},{"id":"aws-end-user-computing","name":"aws-end-user-computing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-end-user-computing.svg","isIsometric":false,"collection":"aws"},{"id":"aws-eventbridge","name":"aws-eventbridge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-eventbridge.svg","isIsometric":false,"collection":"aws"},{"id":"aws-express-workflows","name":"aws-express-workflows","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-express-workflows.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fargate","name":"aws-fargate","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fargate.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fault-injection-simulator","name":"aws-fault-injection-simulator","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fault-injection-simulator.svg","isIsometric":false,"collection":"aws"},{"id":"aws-finspace","name":"aws-finspace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-finspace.svg","isIsometric":false,"collection":"aws"},{"id":"aws-firewall-manager","name":"aws-firewall-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-firewall-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-forecast","name":"aws-forecast","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-forecast.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fraud-detector","name":"aws-fraud-detector","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fraud-detector.svg","isIsometric":false,"collection":"aws"},{"id":"aws-freertos","name":"aws-freertos","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-freertos.svg","isIsometric":false,"collection":"aws"},{"id":"aws-front-end-web-mobile","name":"aws-front-end-web-mobile","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-front-end-web-mobile.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fsx-for-lustre","name":"aws-fsx-for-lustre","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fsx-for-lustre.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fsx-for-netapp-ontap","name":"aws-fsx-for-netapp-ontap","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fsx-for-netapp-ontap.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fsx-for-openzfs","name":"aws-fsx-for-openzfs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fsx-for-openzfs.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fsx-for-wfs","name":"aws-fsx-for-wfs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fsx-for-wfs.svg","isIsometric":false,"collection":"aws"},{"id":"aws-fsx","name":"aws-fsx","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-fsx.svg","isIsometric":false,"collection":"aws"},{"id":"aws-game-tech","name":"aws-game-tech","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-game-tech.svg","isIsometric":false,"collection":"aws"},{"id":"aws-gamekit","name":"aws-gamekit","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-gamekit.svg","isIsometric":false,"collection":"aws"},{"id":"aws-gamelift","name":"aws-gamelift","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-gamelift.svg","isIsometric":false,"collection":"aws"},{"id":"aws-gamesparks","name":"aws-gamesparks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-gamesparks.svg","isIsometric":false,"collection":"aws"},{"id":"aws-genomics-cli","name":"aws-genomics-cli","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-genomics-cli.svg","isIsometric":false,"collection":"aws"},{"id":"aws-global-accelerator","name":"aws-global-accelerator","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-global-accelerator.svg","isIsometric":false,"collection":"aws"},{"id":"aws-glue-databrew","name":"aws-glue-databrew","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-glue-databrew.svg","isIsometric":false,"collection":"aws"},{"id":"aws-glue-elastic-views","name":"aws-glue-elastic-views","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-glue-elastic-views.svg","isIsometric":false,"collection":"aws"},{"id":"aws-glue","name":"aws-glue","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-glue.svg","isIsometric":false,"collection":"aws"},{"id":"aws-ground-station","name":"aws-ground-station","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-ground-station.svg","isIsometric":false,"collection":"aws"},{"id":"aws-guardduty","name":"aws-guardduty","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-guardduty.svg","isIsometric":false,"collection":"aws"},{"id":"aws-healthlake","name":"aws-healthlake","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-healthlake.svg","isIsometric":false,"collection":"aws"},{"id":"aws-honeycode","name":"aws-honeycode","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-honeycode.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iam-identity-center","name":"aws-iam-identity-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iam-identity-center.svg","isIsometric":false,"collection":"aws"},{"id":"aws-identity-and-access-management","name":"aws-identity-and-access-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-identity-and-access-management.svg","isIsometric":false,"collection":"aws"},{"id":"aws-inspector","name":"aws-inspector","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-inspector.svg","isIsometric":false,"collection":"aws"},{"id":"aws-interactive-video-service","name":"aws-interactive-video-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-interactive-video-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-internet-of-things","name":"aws-internet-of-things","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-internet-of-things.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-1-click","name":"aws-iot-1-click","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-1-click.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-analytics","name":"aws-iot-analytics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-analytics.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-button","name":"aws-iot-button","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-button.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-core","name":"aws-iot-core","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-core.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-device-defender","name":"aws-iot-device-defender","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-device-defender.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-device-management","name":"aws-iot-device-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-device-management.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-edukit","name":"aws-iot-edukit","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-edukit.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-events","name":"aws-iot-events","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-events.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-expresslink","name":"aws-iot-expresslink","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-expresslink.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-fleetwise","name":"aws-iot-fleetwise","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-fleetwise.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-greengrass","name":"aws-iot-greengrass","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-greengrass.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-roborunner","name":"aws-iot-roborunner","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-roborunner.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-sitewise","name":"aws-iot-sitewise","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-sitewise.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-things-graph","name":"aws-iot-things-graph","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-things-graph.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iot-twinmaker","name":"aws-iot-twinmaker","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iot-twinmaker.svg","isIsometric":false,"collection":"aws"},{"id":"aws-iq","name":"aws-iq","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-iq.svg","isIsometric":false,"collection":"aws"},{"id":"aws-kendra","name":"aws-kendra","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-kendra.svg","isIsometric":false,"collection":"aws"},{"id":"aws-key-management-service","name":"aws-key-management-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-key-management-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-keyspaces","name":"aws-keyspaces","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-keyspaces.svg","isIsometric":false,"collection":"aws"},{"id":"aws-kinesis-data-analytics","name":"aws-kinesis-data-analytics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-kinesis-data-analytics.svg","isIsometric":false,"collection":"aws"},{"id":"aws-kinesis-data-streams","name":"aws-kinesis-data-streams","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-kinesis-data-streams.svg","isIsometric":false,"collection":"aws"},{"id":"aws-kinesis-firehose","name":"aws-kinesis-firehose","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-kinesis-firehose.svg","isIsometric":false,"collection":"aws"},{"id":"aws-kinesis-video-streams","name":"aws-kinesis-video-streams","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-kinesis-video-streams.svg","isIsometric":false,"collection":"aws"},{"id":"aws-kinesis","name":"aws-kinesis","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-kinesis.svg","isIsometric":false,"collection":"aws"},{"id":"aws-lake-formation","name":"aws-lake-formation","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-lake-formation.svg","isIsometric":false,"collection":"aws"},{"id":"aws-lambda","name":"aws-lambda","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-lambda.svg","isIsometric":false,"collection":"aws"},{"id":"aws-launch-wizard","name":"aws-launch-wizard","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-launch-wizard.svg","isIsometric":false,"collection":"aws"},{"id":"aws-lex","name":"aws-lex","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-lex.svg","isIsometric":false,"collection":"aws"},{"id":"aws-license-manager","name":"aws-license-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-license-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-lightsail","name":"aws-lightsail","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-lightsail.svg","isIsometric":false,"collection":"aws"},{"id":"aws-local-zones","name":"aws-local-zones","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-local-zones.svg","isIsometric":false,"collection":"aws"},{"id":"aws-location-service","name":"aws-location-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-location-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-lookout-for-equipment","name":"aws-lookout-for-equipment","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-lookout-for-equipment.svg","isIsometric":false,"collection":"aws"},{"id":"aws-lookout-for-metrics","name":"aws-lookout-for-metrics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-lookout-for-metrics.svg","isIsometric":false,"collection":"aws"},{"id":"aws-lookout-for-vision","name":"aws-lookout-for-vision","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-lookout-for-vision.svg","isIsometric":false,"collection":"aws"},{"id":"aws-machine-learning","name":"aws-machine-learning","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-machine-learning.svg","isIsometric":false,"collection":"aws"},{"id":"aws-macie","name":"aws-macie","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-macie.svg","isIsometric":false,"collection":"aws"},{"id":"aws-mainframe-modernization","name":"aws-mainframe-modernization","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-mainframe-modernization.svg","isIsometric":false,"collection":"aws"},{"id":"aws-managed-blockchain","name":"aws-managed-blockchain","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-managed-blockchain.svg","isIsometric":false,"collection":"aws"},{"id":"aws-managed-grafana","name":"aws-managed-grafana","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-managed-grafana.svg","isIsometric":false,"collection":"aws"},{"id":"aws-managed-service-for-prometheus","name":"aws-managed-service-for-prometheus","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-managed-service-for-prometheus.svg","isIsometric":false,"collection":"aws"},{"id":"aws-managed-services","name":"aws-managed-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-managed-services.svg","isIsometric":false,"collection":"aws"},{"id":"aws-managed-streaming-for-apache-kafka","name":"aws-managed-streaming-for-apache-kafka","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-managed-streaming-for-apache-kafka.svg","isIsometric":false,"collection":"aws"},{"id":"aws-managed-workflows-for-apache-airflow","name":"aws-managed-workflows-for-apache-airflow","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-managed-workflows-for-apache-airflow.svg","isIsometric":false,"collection":"aws"},{"id":"aws-management-console","name":"aws-management-console","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-management-console.svg","isIsometric":false,"collection":"aws"},{"id":"aws-management-governance","name":"aws-management-governance","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-management-governance.svg","isIsometric":false,"collection":"aws"},{"id":"aws-marketplace","name":"aws-marketplace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-marketplace.svg","isIsometric":false,"collection":"aws"},{"id":"aws-media-services","name":"aws-media-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-media-services.svg","isIsometric":false,"collection":"aws"},{"id":"aws-memorydb-for-redis","name":"aws-memorydb-for-redis","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-memorydb-for-redis.svg","isIsometric":false,"collection":"aws"},{"id":"aws-migration-evaluator","name":"aws-migration-evaluator","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-migration-evaluator.svg","isIsometric":false,"collection":"aws"},{"id":"aws-migration-hub","name":"aws-migration-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-migration-hub.svg","isIsometric":false,"collection":"aws"},{"id":"aws-migration-transfer","name":"aws-migration-transfer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-migration-transfer.svg","isIsometric":false,"collection":"aws"},{"id":"aws-monitron","name":"aws-monitron","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-monitron.svg","isIsometric":false,"collection":"aws"},{"id":"aws-mq","name":"aws-mq","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-mq.svg","isIsometric":false,"collection":"aws"},{"id":"aws-neptune","name":"aws-neptune","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-neptune.svg","isIsometric":false,"collection":"aws"},{"id":"aws-network-firewall","name":"aws-network-firewall","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-network-firewall.svg","isIsometric":false,"collection":"aws"},{"id":"aws-networking-content-delivery","name":"aws-networking-content-delivery","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-networking-content-delivery.svg","isIsometric":false,"collection":"aws"},{"id":"aws-neuron","name":"aws-neuron","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-neuron.svg","isIsometric":false,"collection":"aws"},{"id":"aws-nice-dcv","name":"aws-nice-dcv","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-nice-dcv.svg","isIsometric":false,"collection":"aws"},{"id":"aws-nice-enginframe","name":"aws-nice-enginframe","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-nice-enginframe.svg","isIsometric":false,"collection":"aws"},{"id":"aws-nimble-studio","name":"aws-nimble-studio","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-nimble-studio.svg","isIsometric":false,"collection":"aws"},{"id":"aws-nitro-enclaves","name":"aws-nitro-enclaves","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-nitro-enclaves.svg","isIsometric":false,"collection":"aws"},{"id":"aws-open-3d-engine","name":"aws-open-3d-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-open-3d-engine.svg","isIsometric":false,"collection":"aws"},{"id":"aws-opensearch-service","name":"aws-opensearch-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-opensearch-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-opsworks","name":"aws-opsworks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-opsworks.svg","isIsometric":false,"collection":"aws"},{"id":"aws-organizations","name":"aws-organizations","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-organizations.svg","isIsometric":false,"collection":"aws"},{"id":"aws-outposts-family","name":"aws-outposts-family","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-outposts-family.svg","isIsometric":false,"collection":"aws"},{"id":"aws-outposts-rack","name":"aws-outposts-rack","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-outposts-rack.svg","isIsometric":false,"collection":"aws"},{"id":"aws-outposts-servers","name":"aws-outposts-servers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-outposts-servers.svg","isIsometric":false,"collection":"aws"},{"id":"aws-panorama","name":"aws-panorama","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-panorama.svg","isIsometric":false,"collection":"aws"},{"id":"aws-parallelcluster","name":"aws-parallelcluster","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-parallelcluster.svg","isIsometric":false,"collection":"aws"},{"id":"aws-personal-health-dashboard","name":"aws-personal-health-dashboard","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-personal-health-dashboard.svg","isIsometric":false,"collection":"aws"},{"id":"aws-personalize","name":"aws-personalize","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-personalize.svg","isIsometric":false,"collection":"aws"},{"id":"aws-pinpoint-apis","name":"aws-pinpoint-apis","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-pinpoint-apis.svg","isIsometric":false,"collection":"aws"},{"id":"aws-pinpoint","name":"aws-pinpoint","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-pinpoint.svg","isIsometric":false,"collection":"aws"},{"id":"aws-polly","name":"aws-polly","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-polly.svg","isIsometric":false,"collection":"aws"},{"id":"aws-private-5g","name":"aws-private-5g","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-private-5g.svg","isIsometric":false,"collection":"aws"},{"id":"aws-privatelink","name":"aws-privatelink","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-privatelink.svg","isIsometric":false,"collection":"aws"},{"id":"aws-professional-services","name":"aws-professional-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-professional-services.svg","isIsometric":false,"collection":"aws"},{"id":"aws-proton","name":"aws-proton","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-proton.svg","isIsometric":false,"collection":"aws"},{"id":"aws-quantum-ledger-database","name":"aws-quantum-ledger-database","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-quantum-ledger-database.svg","isIsometric":false,"collection":"aws"},{"id":"aws-quantum-technologies","name":"aws-quantum-technologies","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-quantum-technologies.svg","isIsometric":false,"collection":"aws"},{"id":"aws-quicksight","name":"aws-quicksight","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-quicksight.svg","isIsometric":false,"collection":"aws"},{"id":"aws-rds-on-vmware","name":"aws-rds-on-vmware","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-rds-on-vmware.svg","isIsometric":false,"collection":"aws"},{"id":"aws-rds","name":"aws-rds","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-rds.svg","isIsometric":false,"collection":"aws"},{"id":"aws-red-hat-openshift","name":"aws-red-hat-openshift","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-red-hat-openshift.svg","isIsometric":false,"collection":"aws"},{"id":"aws-redshift","name":"aws-redshift","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-redshift.svg","isIsometric":false,"collection":"aws"},{"id":"aws-rekognition","name":"aws-rekognition","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-rekognition.svg","isIsometric":false,"collection":"aws"},{"id":"aws-repost","name":"aws-repost","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-repost.svg","isIsometric":false,"collection":"aws"},{"id":"aws-reserved-instance-reporting","name":"aws-reserved-instance-reporting","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-reserved-instance-reporting.svg","isIsometric":false,"collection":"aws"},{"id":"aws-resilience-hub","name":"aws-resilience-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-resilience-hub.svg","isIsometric":false,"collection":"aws"},{"id":"aws-resource-access-manager","name":"aws-resource-access-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-resource-access-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-robomaker","name":"aws-robomaker","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-robomaker.svg","isIsometric":false,"collection":"aws"},{"id":"aws-robotics","name":"aws-robotics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-robotics.svg","isIsometric":false,"collection":"aws"},{"id":"aws-route-53","name":"aws-route-53","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-route-53.svg","isIsometric":false,"collection":"aws"},{"id":"aws-s3-on-outposts","name":"aws-s3-on-outposts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-s3-on-outposts.svg","isIsometric":false,"collection":"aws"},{"id":"aws-sagemaker-ground-truth","name":"aws-sagemaker-ground-truth","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-sagemaker-ground-truth.svg","isIsometric":false,"collection":"aws"},{"id":"aws-sagemaker-studio-lab","name":"aws-sagemaker-studio-lab","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-sagemaker-studio-lab.svg","isIsometric":false,"collection":"aws"},{"id":"aws-sagemaker","name":"aws-sagemaker","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-sagemaker.svg","isIsometric":false,"collection":"aws"},{"id":"aws-satellite","name":"aws-satellite","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-satellite.svg","isIsometric":false,"collection":"aws"},{"id":"aws-savings-plans","name":"aws-savings-plans","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-savings-plans.svg","isIsometric":false,"collection":"aws"},{"id":"aws-secrets-manager","name":"aws-secrets-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-secrets-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-security-hub","name":"aws-security-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-security-hub.svg","isIsometric":false,"collection":"aws"},{"id":"aws-security-identity-compliance","name":"aws-security-identity-compliance","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-security-identity-compliance.svg","isIsometric":false,"collection":"aws"},{"id":"aws-server-migration-service","name":"aws-server-migration-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-server-migration-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-serverless-application-repository","name":"aws-serverless-application-repository","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-serverless-application-repository.svg","isIsometric":false,"collection":"aws"},{"id":"aws-serverless","name":"aws-serverless","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-serverless.svg","isIsometric":false,"collection":"aws"},{"id":"aws-service-catalog","name":"aws-service-catalog","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-service-catalog.svg","isIsometric":false,"collection":"aws"},{"id":"aws-shield","name":"aws-shield","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-shield.svg","isIsometric":false,"collection":"aws"},{"id":"aws-signer","name":"aws-signer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-signer.svg","isIsometric":false,"collection":"aws"},{"id":"aws-simple-email-service","name":"aws-simple-email-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-simple-email-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-simple-notification-service","name":"aws-simple-notification-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-simple-notification-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-simple-queue-service","name":"aws-simple-queue-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-simple-queue-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-simple-storage-service-glacier","name":"aws-simple-storage-service-glacier","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-simple-storage-service-glacier.svg","isIsometric":false,"collection":"aws"},{"id":"aws-simple-storage-service","name":"aws-simple-storage-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-simple-storage-service.svg","isIsometric":false,"collection":"aws"},{"id":"aws-site-to-site-vpn","name":"aws-site-to-site-vpn","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-site-to-site-vpn.svg","isIsometric":false,"collection":"aws"},{"id":"aws-snowball-edge","name":"aws-snowball-edge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-snowball-edge.svg","isIsometric":false,"collection":"aws"},{"id":"aws-snowball","name":"aws-snowball","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-snowball.svg","isIsometric":false,"collection":"aws"},{"id":"aws-snowcone","name":"aws-snowcone","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-snowcone.svg","isIsometric":false,"collection":"aws"},{"id":"aws-snowmobile","name":"aws-snowmobile","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-snowmobile.svg","isIsometric":false,"collection":"aws"},{"id":"aws-step-functions","name":"aws-step-functions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-step-functions.svg","isIsometric":false,"collection":"aws"},{"id":"aws-storage-gateway","name":"aws-storage-gateway","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-storage-gateway.svg","isIsometric":false,"collection":"aws"},{"id":"aws-storage","name":"aws-storage","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-storage.svg","isIsometric":false,"collection":"aws"},{"id":"aws-sumerian","name":"aws-sumerian","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-sumerian.svg","isIsometric":false,"collection":"aws"},{"id":"aws-support","name":"aws-support","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-support.svg","isIsometric":false,"collection":"aws"},{"id":"aws-systems-manager-incident-manager","name":"aws-systems-manager-incident-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-systems-manager-incident-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-systems-manager","name":"aws-systems-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-systems-manager.svg","isIsometric":false,"collection":"aws"},{"id":"aws-tensorflow-on-aws","name":"aws-tensorflow-on-aws","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-tensorflow-on-aws.svg","isIsometric":false,"collection":"aws"},{"id":"aws-textract","name":"aws-textract","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-textract.svg","isIsometric":false,"collection":"aws"},{"id":"aws-thinkbox-deadline","name":"aws-thinkbox-deadline","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-thinkbox-deadline.svg","isIsometric":false,"collection":"aws"},{"id":"aws-thinkbox-frost","name":"aws-thinkbox-frost","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-thinkbox-frost.svg","isIsometric":false,"collection":"aws"},{"id":"aws-thinkbox-krakatoa","name":"aws-thinkbox-krakatoa","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-thinkbox-krakatoa.svg","isIsometric":false,"collection":"aws"},{"id":"aws-thinkbox-sequoia","name":"aws-thinkbox-sequoia","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-thinkbox-sequoia.svg","isIsometric":false,"collection":"aws"},{"id":"aws-thinkbox-stoke","name":"aws-thinkbox-stoke","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-thinkbox-stoke.svg","isIsometric":false,"collection":"aws"},{"id":"aws-thinkbox-xmesh","name":"aws-thinkbox-xmesh","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-thinkbox-xmesh.svg","isIsometric":false,"collection":"aws"},{"id":"aws-timestream","name":"aws-timestream","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-timestream.svg","isIsometric":false,"collection":"aws"},{"id":"aws-tools-and-sdks","name":"aws-tools-and-sdks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-tools-and-sdks.svg","isIsometric":false,"collection":"aws"},{"id":"aws-torchserve","name":"aws-torchserve","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-torchserve.svg","isIsometric":false,"collection":"aws"},{"id":"aws-training-certification","name":"aws-training-certification","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-training-certification.svg","isIsometric":false,"collection":"aws"},{"id":"aws-transcribe","name":"aws-transcribe","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-transcribe.svg","isIsometric":false,"collection":"aws"},{"id":"aws-transfer-family","name":"aws-transfer-family","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-transfer-family.svg","isIsometric":false,"collection":"aws"},{"id":"aws-transit-gateway","name":"aws-transit-gateway","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-transit-gateway.svg","isIsometric":false,"collection":"aws"},{"id":"aws-translate","name":"aws-translate","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-translate.svg","isIsometric":false,"collection":"aws"},{"id":"aws-trusted-advisor","name":"aws-trusted-advisor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-trusted-advisor.svg","isIsometric":false,"collection":"aws"},{"id":"aws-vmware-cloud-on-aws","name":"aws-vmware-cloud-on-aws","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-vmware-cloud-on-aws.svg","isIsometric":false,"collection":"aws"},{"id":"aws-vr-ar","name":"aws-vr-ar","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-vr-ar.svg","isIsometric":false,"collection":"aws"},{"id":"aws-waf","name":"aws-waf","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-waf.svg","isIsometric":false,"collection":"aws"},{"id":"aws-wavelength","name":"aws-wavelength","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-wavelength.svg","isIsometric":false,"collection":"aws"},{"id":"aws-well-architected-tool","name":"aws-well-architected-tool","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-well-architected-tool.svg","isIsometric":false,"collection":"aws"},{"id":"aws-workdocs-sdk","name":"aws-workdocs-sdk","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-workdocs-sdk.svg","isIsometric":false,"collection":"aws"},{"id":"aws-workdocs","name":"aws-workdocs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-workdocs.svg","isIsometric":false,"collection":"aws"},{"id":"aws-worklink","name":"aws-worklink","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-worklink.svg","isIsometric":false,"collection":"aws"},{"id":"aws-workmail","name":"aws-workmail","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-workmail.svg","isIsometric":false,"collection":"aws"},{"id":"aws-workspaces-web","name":"aws-workspaces-web","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-workspaces-web.svg","isIsometric":false,"collection":"aws"},{"id":"aws-workspaces","name":"aws-workspaces","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-workspaces.svg","isIsometric":false,"collection":"aws"},{"id":"aws-x-ray","name":"aws-x-ray","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/aws/aws-x-ray.svg","isIsometric":false,"collection":"aws"},{"id":"_azure_","name":"_azure_","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/_azure_.svg","isIsometric":false,"collection":"azure"},{"id":"azure-aad-licenses","name":"azure-aad-licenses","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-aad-licenses.svg","isIsometric":false,"collection":"azure"},{"id":"azure-abs-member","name":"azure-abs-member","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-abs-member.svg","isIsometric":false,"collection":"azure"},{"id":"azure-acs-solutions-builder","name":"azure-acs-solutions-builder","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-acs-solutions-builder.svg","isIsometric":false,"collection":"azure"},{"id":"azure-active-directory-connect-health","name":"azure-active-directory-connect-health","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-active-directory-connect-health.svg","isIsometric":false,"collection":"azure"},{"id":"azure-active-directory","name":"azure-active-directory","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-active-directory.svg","isIsometric":false,"collection":"azure"},{"id":"azure-activity-log","name":"azure-activity-log","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-activity-log.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ad-b2c","name":"azure-ad-b2c","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ad-b2c.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ad-domain-services","name":"azure-ad-domain-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ad-domain-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ad-identity-protection","name":"azure-ad-identity-protection","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ad-identity-protection.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ad-privilege-identity-management","name":"azure-ad-privilege-identity-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ad-privilege-identity-management.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ad-roles-and-administrators","name":"azure-ad-roles-and-administrators","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ad-roles-and-administrators.svg","isIsometric":false,"collection":"azure"},{"id":"azure-advisor","name":"azure-advisor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-advisor.svg","isIsometric":false,"collection":"azure"},{"id":"azure-alerts","name":"azure-alerts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-alerts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-all-resources","name":"azure-all-resources","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-all-resources.svg","isIsometric":false,"collection":"azure"},{"id":"azure-analysis-services","name":"azure-analysis-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-analysis-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-api-connections","name":"azure-api-connections","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-api-connections.svg","isIsometric":false,"collection":"azure"},{"id":"azure-api-for-fhir","name":"azure-api-for-fhir","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-api-for-fhir.svg","isIsometric":false,"collection":"azure"},{"id":"azure-api-management-services","name":"azure-api-management-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-api-management-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-api-proxy","name":"azure-api-proxy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-api-proxy.svg","isIsometric":false,"collection":"azure"},{"id":"azure-app-configuration","name":"azure-app-configuration","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-app-configuration.svg","isIsometric":false,"collection":"azure"},{"id":"azure-app-registrations","name":"azure-app-registrations","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-app-registrations.svg","isIsometric":false,"collection":"azure"},{"id":"azure-app-service-certificates","name":"azure-app-service-certificates","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-app-service-certificates.svg","isIsometric":false,"collection":"azure"},{"id":"azure-app-service-domains","name":"azure-app-service-domains","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-app-service-domains.svg","isIsometric":false,"collection":"azure"},{"id":"azure-app-service-environments","name":"azure-app-service-environments","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-app-service-environments.svg","isIsometric":false,"collection":"azure"},{"id":"azure-app-service-plans","name":"azure-app-service-plans","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-app-service-plans.svg","isIsometric":false,"collection":"azure"},{"id":"azure-app-services","name":"azure-app-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-app-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-applens","name":"azure-applens","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-applens.svg","isIsometric":false,"collection":"azure"},{"id":"azure-application-gateways","name":"azure-application-gateways","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-application-gateways.svg","isIsometric":false,"collection":"azure"},{"id":"azure-application-insights","name":"azure-application-insights","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-application-insights.svg","isIsometric":false,"collection":"azure"},{"id":"azure-application-security-groups","name":"azure-application-security-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-application-security-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-applied-ai","name":"azure-applied-ai","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-applied-ai.svg","isIsometric":false,"collection":"azure"},{"id":"azure-aquila","name":"azure-aquila","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-aquila.svg","isIsometric":false,"collection":"azure"},{"id":"azure-arc","name":"azure-arc","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-arc.svg","isIsometric":false,"collection":"azure"},{"id":"azure-automanaged-vm","name":"azure-automanaged-vm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-automanaged-vm.svg","isIsometric":false,"collection":"azure"},{"id":"azure-automation-accounts","name":"azure-automation-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-automation-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-availability-sets","name":"azure-availability-sets","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-availability-sets.svg","isIsometric":false,"collection":"azure"},{"id":"azure-avs-vm","name":"azure-avs-vm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-avs-vm.svg","isIsometric":false,"collection":"azure"},{"id":"azure-avs","name":"azure-avs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-avs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-azurite","name":"azure-azurite","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-azurite.svg","isIsometric":false,"collection":"azure"},{"id":"azure-backlog","name":"azure-backlog","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-backlog.svg","isIsometric":false,"collection":"azure"},{"id":"azure-backup-center","name":"azure-backup-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-backup-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-bare-metal-infrastructure","name":"azure-bare-metal-infrastructure","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-bare-metal-infrastructure.svg","isIsometric":false,"collection":"azure"},{"id":"azure-bastions","name":"azure-bastions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-bastions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-batch-accounts","name":"azure-batch-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-batch-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-batch-ai","name":"azure-batch-ai","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-batch-ai.svg","isIsometric":false,"collection":"azure"},{"id":"azure-biz-talk","name":"azure-biz-talk","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-biz-talk.svg","isIsometric":false,"collection":"azure"},{"id":"azure-blob-block","name":"azure-blob-block","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-blob-block.svg","isIsometric":false,"collection":"azure"},{"id":"azure-blob-page","name":"azure-blob-page","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-blob-page.svg","isIsometric":false,"collection":"azure"},{"id":"azure-blockchain-applications","name":"azure-blockchain-applications","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-blockchain-applications.svg","isIsometric":false,"collection":"azure"},{"id":"azure-blockchain-service","name":"azure-blockchain-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-blockchain-service.svg","isIsometric":false,"collection":"azure"},{"id":"azure-blueprints","name":"azure-blueprints","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-blueprints.svg","isIsometric":false,"collection":"azure"},{"id":"azure-bot-services","name":"azure-bot-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-bot-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-branch","name":"azure-branch","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-branch.svg","isIsometric":false,"collection":"azure"},{"id":"azure-browser","name":"azure-browser","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-browser.svg","isIsometric":false,"collection":"azure"},{"id":"azure-bug","name":"azure-bug","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-bug.svg","isIsometric":false,"collection":"azure"},{"id":"azure-builds","name":"azure-builds","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-builds.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cache-redis","name":"azure-cache-redis","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cache-redis.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cache","name":"azure-cache","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cache.svg","isIsometric":false,"collection":"azure"},{"id":"azure-capacity","name":"azure-capacity","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-capacity.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cdn-profiles","name":"azure-cdn-profiles","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cdn-profiles.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ceres","name":"azure-ceres","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ceres.svg","isIsometric":false,"collection":"azure"},{"id":"azure-chaos-studio","name":"azure-chaos-studio","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-chaos-studio.svg","isIsometric":false,"collection":"azure"},{"id":"azure-client-apps","name":"azure-client-apps","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-client-apps.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cloud-services-classic","name":"azure-cloud-services-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cloud-services-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cloud-services-extended-support","name":"azure-cloud-services-extended-support","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cloud-services-extended-support.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cloud-shell","name":"azure-cloud-shell","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cloud-shell.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cloudtest","name":"azure-cloudtest","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cloudtest.svg","isIsometric":false,"collection":"azure"},{"id":"azure-code","name":"azure-code","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-code.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cognitive-services","name":"azure-cognitive-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cognitive-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-collaborative-service","name":"azure-collaborative-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-collaborative-service.svg","isIsometric":false,"collection":"azure"},{"id":"azure-commit","name":"azure-commit","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-commit.svg","isIsometric":false,"collection":"azure"},{"id":"azure-communication-services","name":"azure-communication-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-communication-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-compliance-center","name":"azure-compliance-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-compliance-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-compliance","name":"azure-compliance","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-compliance.svg","isIsometric":false,"collection":"azure"},{"id":"azure-compute-galleries","name":"azure-compute-galleries","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-compute-galleries.svg","isIsometric":false,"collection":"azure"},{"id":"azure-conditional-access","name":"azure-conditional-access","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-conditional-access.svg","isIsometric":false,"collection":"azure"},{"id":"azure-confidential-ledger","name":"azure-confidential-ledger","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-confidential-ledger.svg","isIsometric":false,"collection":"azure"},{"id":"azure-connected-cache","name":"azure-connected-cache","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-connected-cache.svg","isIsometric":false,"collection":"azure"},{"id":"azure-connected-vehicle-platform","name":"azure-connected-vehicle-platform","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-connected-vehicle-platform.svg","isIsometric":false,"collection":"azure"},{"id":"azure-connections","name":"azure-connections","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-connections.svg","isIsometric":false,"collection":"azure"},{"id":"azure-consortium","name":"azure-consortium","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-consortium.svg","isIsometric":false,"collection":"azure"},{"id":"azure-container-app-environments","name":"azure-container-app-environments","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-container-app-environments.svg","isIsometric":false,"collection":"azure"},{"id":"azure-container-instances","name":"azure-container-instances","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-container-instances.svg","isIsometric":false,"collection":"azure"},{"id":"azure-container-registries","name":"azure-container-registries","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-container-registries.svg","isIsometric":false,"collection":"azure"},{"id":"azure-controls-horizontal","name":"azure-controls-horizontal","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-controls-horizontal.svg","isIsometric":false,"collection":"azure"},{"id":"azure-controls","name":"azure-controls","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-controls.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cosmos-db","name":"azure-cosmos-db","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cosmos-db.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cost-alerts","name":"azure-cost-alerts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cost-alerts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cost-analysis","name":"azure-cost-analysis","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cost-analysis.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cost-budgets","name":"azure-cost-budgets","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cost-budgets.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cost-management-and-billing","name":"azure-cost-management-and-billing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cost-management-and-billing.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cost-management","name":"azure-cost-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cost-management.svg","isIsometric":false,"collection":"azure"},{"id":"azure-counter","name":"azure-counter","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-counter.svg","isIsometric":false,"collection":"azure"},{"id":"azure-cubes","name":"azure-cubes","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-cubes.svg","isIsometric":false,"collection":"azure"},{"id":"azure-custom-azure-ad-roles","name":"azure-custom-azure-ad-roles","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-custom-azure-ad-roles.svg","isIsometric":false,"collection":"azure"},{"id":"azure-custom-ip-prefix","name":"azure-custom-ip-prefix","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-custom-ip-prefix.svg","isIsometric":false,"collection":"azure"},{"id":"azure-customer-lockbox-for-microsoft-azure","name":"azure-customer-lockbox-for-microsoft-azure","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-customer-lockbox-for-microsoft-azure.svg","isIsometric":false,"collection":"azure"},{"id":"azure-dashboard-hub","name":"azure-dashboard-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-dashboard-hub.svg","isIsometric":false,"collection":"azure"},{"id":"azure-dashboard","name":"azure-dashboard","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-dashboard.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-box-edge","name":"azure-data-box-edge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-box-edge.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-box","name":"azure-data-box","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-box.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-catalog","name":"azure-data-catalog","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-catalog.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-explorer-clusters","name":"azure-data-explorer-clusters","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-explorer-clusters.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-factory","name":"azure-data-factory","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-factory.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-lake-analytics","name":"azure-data-lake-analytics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-lake-analytics.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-lake-storage-gen1","name":"azure-data-lake-storage-gen1","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-lake-storage-gen1.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-lake-store-gen1","name":"azure-data-lake-store-gen1","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-lake-store-gen1.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-share-invitations","name":"azure-data-share-invitations","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-share-invitations.svg","isIsometric":false,"collection":"azure"},{"id":"azure-data-shares","name":"azure-data-shares","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-data-shares.svg","isIsometric":false,"collection":"azure"},{"id":"azure-database-mariadb-server","name":"azure-database-mariadb-server","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-database-mariadb-server.svg","isIsometric":false,"collection":"azure"},{"id":"azure-database-migration-services","name":"azure-database-migration-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-database-migration-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-database-mysql-server","name":"azure-database-mysql-server","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-database-mysql-server.svg","isIsometric":false,"collection":"azure"},{"id":"azure-database-postgresql-server-group","name":"azure-database-postgresql-server-group","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-database-postgresql-server-group.svg","isIsometric":false,"collection":"azure"},{"id":"azure-database-postgresql-server","name":"azure-database-postgresql-server","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-database-postgresql-server.svg","isIsometric":false,"collection":"azure"},{"id":"azure-databricks","name":"azure-databricks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-databricks.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ddos-protection-plans","name":"azure-ddos-protection-plans","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ddos-protection-plans.svg","isIsometric":false,"collection":"azure"},{"id":"azure-dedicated-hsm","name":"azure-dedicated-hsm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-dedicated-hsm.svg","isIsometric":false,"collection":"azure"},{"id":"azure-defender","name":"azure-defender","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-defender.svg","isIsometric":false,"collection":"azure"},{"id":"azure-detonation","name":"azure-detonation","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-detonation.svg","isIsometric":false,"collection":"azure"},{"id":"azure-dev-console","name":"azure-dev-console","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-dev-console.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-compliance","name":"azure-device-compliance","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-compliance.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-configuration","name":"azure-device-configuration","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-configuration.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-enrollment","name":"azure-device-enrollment","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-enrollment.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-provisioning-services","name":"azure-device-provisioning-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-provisioning-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-security-apple","name":"azure-device-security-apple","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-security-apple.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-security-google","name":"azure-device-security-google","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-security-google.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-security-windows","name":"azure-device-security-windows","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-security-windows.svg","isIsometric":false,"collection":"azure"},{"id":"azure-device-update-iot-hub","name":"azure-device-update-iot-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-device-update-iot-hub.svg","isIsometric":false,"collection":"azure"},{"id":"azure-devices","name":"azure-devices","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-devices.svg","isIsometric":false,"collection":"azure"},{"id":"azure-devops","name":"azure-devops","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-devops.svg","isIsometric":false,"collection":"azure"},{"id":"azure-devtest-labs","name":"azure-devtest-labs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-devtest-labs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-diagnostics-settings","name":"azure-diagnostics-settings","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-diagnostics-settings.svg","isIsometric":false,"collection":"azure"},{"id":"azure-digital-twins","name":"azure-digital-twins","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-digital-twins.svg","isIsometric":false,"collection":"azure"},{"id":"azure-disk-encryption-sets","name":"azure-disk-encryption-sets","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-disk-encryption-sets.svg","isIsometric":false,"collection":"azure"},{"id":"azure-disk-pool","name":"azure-disk-pool","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-disk-pool.svg","isIsometric":false,"collection":"azure"},{"id":"azure-disks-classic","name":"azure-disks-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-disks-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-disks-snapshots","name":"azure-disks-snapshots","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-disks-snapshots.svg","isIsometric":false,"collection":"azure"},{"id":"azure-disks","name":"azure-disks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-disks.svg","isIsometric":false,"collection":"azure"},{"id":"azure-dns-private-resolver","name":"azure-dns-private-resolver","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-dns-private-resolver.svg","isIsometric":false,"collection":"azure"},{"id":"azure-dns-zones","name":"azure-dns-zones","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-dns-zones.svg","isIsometric":false,"collection":"azure"},{"id":"azure-download","name":"azure-download","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-download.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ebooks","name":"azure-ebooks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ebooks.svg","isIsometric":false,"collection":"azure"},{"id":"azure-edge-hardware-center","name":"azure-edge-hardware-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-edge-hardware-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-edge-management","name":"azure-edge-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-edge-management.svg","isIsometric":false,"collection":"azure"},{"id":"azure-education","name":"azure-education","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-education.svg","isIsometric":false,"collection":"azure"},{"id":"azure-elastic-job-agents","name":"azure-elastic-job-agents","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-elastic-job-agents.svg","isIsometric":false,"collection":"azure"},{"id":"azure-endpoint-analytics","name":"azure-endpoint-analytics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-endpoint-analytics.svg","isIsometric":false,"collection":"azure"},{"id":"azure-enterprise-applications","name":"azure-enterprise-applications","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-enterprise-applications.svg","isIsometric":false,"collection":"azure"},{"id":"azure-error","name":"azure-error","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-error.svg","isIsometric":false,"collection":"azure"},{"id":"azure-event-grid-domains","name":"azure-event-grid-domains","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-event-grid-domains.svg","isIsometric":false,"collection":"azure"},{"id":"azure-event-grid-subscriptions","name":"azure-event-grid-subscriptions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-event-grid-subscriptions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-event-grid-topics","name":"azure-event-grid-topics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-event-grid-topics.svg","isIsometric":false,"collection":"azure"},{"id":"azure-event-hub-clusters","name":"azure-event-hub-clusters","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-event-hub-clusters.svg","isIsometric":false,"collection":"azure"},{"id":"azure-event-hubs","name":"azure-event-hubs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-event-hubs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-exchange-access","name":"azure-exchange-access","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-exchange-access.svg","isIsometric":false,"collection":"azure"},{"id":"azure-exchange-on-premises-access","name":"azure-exchange-on-premises-access","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-exchange-on-premises-access.svg","isIsometric":false,"collection":"azure"},{"id":"azure-experimentation-studio","name":"azure-experimentation-studio","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-experimentation-studio.svg","isIsometric":false,"collection":"azure"},{"id":"azure-expressroute-circuits","name":"azure-expressroute-circuits","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-expressroute-circuits.svg","isIsometric":false,"collection":"azure"},{"id":"azure-expressroute-direct","name":"azure-expressroute-direct","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-expressroute-direct.svg","isIsometric":false,"collection":"azure"},{"id":"azure-extendedsecurityupdates","name":"azure-extendedsecurityupdates","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-extendedsecurityupdates.svg","isIsometric":false,"collection":"azure"},{"id":"azure-extensions","name":"azure-extensions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-extensions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-fiji","name":"azure-fiji","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-fiji.svg","isIsometric":false,"collection":"azure"},{"id":"azure-file","name":"azure-file","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-file.svg","isIsometric":false,"collection":"azure"},{"id":"azure-files","name":"azure-files","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-files.svg","isIsometric":false,"collection":"azure"},{"id":"azure-fileshare","name":"azure-fileshare","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-fileshare.svg","isIsometric":false,"collection":"azure"},{"id":"azure-firewall-manager","name":"azure-firewall-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-firewall-manager.svg","isIsometric":false,"collection":"azure"},{"id":"azure-firewalls","name":"azure-firewalls","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-firewalls.svg","isIsometric":false,"collection":"azure"},{"id":"azure-folder-blank","name":"azure-folder-blank","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-folder-blank.svg","isIsometric":false,"collection":"azure"},{"id":"azure-folder-website","name":"azure-folder-website","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-folder-website.svg","isIsometric":false,"collection":"azure"},{"id":"azure-free-services","name":"azure-free-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-free-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-front-doors","name":"azure-front-doors","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-front-doors.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ftp","name":"azure-ftp","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ftp.svg","isIsometric":false,"collection":"azure"},{"id":"azure-function-apps","name":"azure-function-apps","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-function-apps.svg","isIsometric":false,"collection":"azure"},{"id":"azure-gear","name":"azure-gear","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-gear.svg","isIsometric":false,"collection":"azure"},{"id":"azure-genomics-accounts","name":"azure-genomics-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-genomics-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-genomics","name":"azure-genomics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-genomics.svg","isIsometric":false,"collection":"azure"},{"id":"azure-globe-error","name":"azure-globe-error","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-globe-error.svg","isIsometric":false,"collection":"azure"},{"id":"azure-globe-success","name":"azure-globe-success","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-globe-success.svg","isIsometric":false,"collection":"azure"},{"id":"azure-globe-warning","name":"azure-globe-warning","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-globe-warning.svg","isIsometric":false,"collection":"azure"},{"id":"azure-grafana","name":"azure-grafana","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-grafana.svg","isIsometric":false,"collection":"azure"},{"id":"azure-groups","name":"azure-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-guide","name":"azure-guide","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-guide.svg","isIsometric":false,"collection":"azure"},{"id":"azure-hcp-cache","name":"azure-hcp-cache","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-hcp-cache.svg","isIsometric":false,"collection":"azure"},{"id":"azure-hd-insight-clusters","name":"azure-hd-insight-clusters","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-hd-insight-clusters.svg","isIsometric":false,"collection":"azure"},{"id":"azure-heart","name":"azure-heart","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-heart.svg","isIsometric":false,"collection":"azure"},{"id":"azure-help-and-support","name":"azure-help-and-support","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-help-and-support.svg","isIsometric":false,"collection":"azure"},{"id":"azure-host-groups","name":"azure-host-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-host-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-hosts","name":"azure-hosts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-hosts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-hpc-workbench","name":"azure-hpc-workbench","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-hpc-workbench.svg","isIsometric":false,"collection":"azure"},{"id":"azure-hybrid-center","name":"azure-hybrid-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-hybrid-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-identity-governance","name":"azure-identity-governance","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-identity-governance.svg","isIsometric":false,"collection":"azure"},{"id":"azure-image-definition","name":"azure-image-definition","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-image-definition.svg","isIsometric":false,"collection":"azure"},{"id":"azure-image-definitions","name":"azure-image-definitions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-image-definitions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-image-templates","name":"azure-image-templates","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-image-templates.svg","isIsometric":false,"collection":"azure"},{"id":"azure-image-version","name":"azure-image-version","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-image-version.svg","isIsometric":false,"collection":"azure"},{"id":"azure-image-versions","name":"azure-image-versions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-image-versions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-image","name":"azure-image","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-image.svg","isIsometric":false,"collection":"azure"},{"id":"azure-images","name":"azure-images","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-images.svg","isIsometric":false,"collection":"azure"},{"id":"azure-import-export-jobs","name":"azure-import-export-jobs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-import-export-jobs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-industrial-iot","name":"azure-industrial-iot","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-industrial-iot.svg","isIsometric":false,"collection":"azure"},{"id":"azure-information-protection","name":"azure-information-protection","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-information-protection.svg","isIsometric":false,"collection":"azure"},{"id":"azure-information","name":"azure-information","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-information.svg","isIsometric":false,"collection":"azure"},{"id":"azure-infrastructure-backup","name":"azure-infrastructure-backup","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-infrastructure-backup.svg","isIsometric":false,"collection":"azure"},{"id":"azure-input-output","name":"azure-input-output","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-input-output.svg","isIsometric":false,"collection":"azure"},{"id":"azure-instance-pools","name":"azure-instance-pools","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-instance-pools.svg","isIsometric":false,"collection":"azure"},{"id":"azure-integration-accounts","name":"azure-integration-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-integration-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-integration-service-environments","name":"azure-integration-service-environments","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-integration-service-environments.svg","isIsometric":false,"collection":"azure"},{"id":"azure-internet-analyzer-profiles","name":"azure-internet-analyzer-profiles","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-internet-analyzer-profiles.svg","isIsometric":false,"collection":"azure"},{"id":"azure-intune-app-protection","name":"azure-intune-app-protection","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-intune-app-protection.svg","isIsometric":false,"collection":"azure"},{"id":"azure-intune-for-education","name":"azure-intune-for-education","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-intune-for-education.svg","isIsometric":false,"collection":"azure"},{"id":"azure-intune-trends","name":"azure-intune-trends","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-intune-trends.svg","isIsometric":false,"collection":"azure"},{"id":"azure-intune","name":"azure-intune","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-intune.svg","isIsometric":false,"collection":"azure"},{"id":"azure-iot-central-applications","name":"azure-iot-central-applications","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-iot-central-applications.svg","isIsometric":false,"collection":"azure"},{"id":"azure-iot-edge","name":"azure-iot-edge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-iot-edge.svg","isIsometric":false,"collection":"azure"},{"id":"azure-iot-hub","name":"azure-iot-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-iot-hub.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ip-groups","name":"azure-ip-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ip-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-journey-hub","name":"azure-journey-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-journey-hub.svg","isIsometric":false,"collection":"azure"},{"id":"azure-key-vaults","name":"azure-key-vaults","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-key-vaults.svg","isIsometric":false,"collection":"azure"},{"id":"azure-keys","name":"azure-keys","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-keys.svg","isIsometric":false,"collection":"azure"},{"id":"azure-kubernetes-services","name":"azure-kubernetes-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-kubernetes-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-lab-services","name":"azure-lab-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-lab-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-language-services","name":"azure-language-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-language-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-launch-portal","name":"azure-launch-portal","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-launch-portal.svg","isIsometric":false,"collection":"azure"},{"id":"azure-learn","name":"azure-learn","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-learn.svg","isIsometric":false,"collection":"azure"},{"id":"azure-lighthouse","name":"azure-lighthouse","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-lighthouse.svg","isIsometric":false,"collection":"azure"},{"id":"azure-load-balancer-hub","name":"azure-load-balancer-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-load-balancer-hub.svg","isIsometric":false,"collection":"azure"},{"id":"azure-load-balancers","name":"azure-load-balancers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-load-balancers.svg","isIsometric":false,"collection":"azure"},{"id":"azure-load-test","name":"azure-load-test","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-load-test.svg","isIsometric":false,"collection":"azure"},{"id":"azure-load-testing","name":"azure-load-testing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-load-testing.svg","isIsometric":false,"collection":"azure"},{"id":"azure-local-network-gateways","name":"azure-local-network-gateways","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-local-network-gateways.svg","isIsometric":false,"collection":"azure"},{"id":"azure-location","name":"azure-location","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-location.svg","isIsometric":false,"collection":"azure"},{"id":"azure-log-analytics-workspaces","name":"azure-log-analytics-workspaces","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-log-analytics-workspaces.svg","isIsometric":false,"collection":"azure"},{"id":"azure-log-streaming","name":"azure-log-streaming","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-log-streaming.svg","isIsometric":false,"collection":"azure"},{"id":"azure-logic-apps-custom-connector","name":"azure-logic-apps-custom-connector","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-logic-apps-custom-connector.svg","isIsometric":false,"collection":"azure"},{"id":"azure-logic-apps","name":"azure-logic-apps","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-logic-apps.svg","isIsometric":false,"collection":"azure"},{"id":"azure-machine-learning-studio-classic-web-services","name":"azure-machine-learning-studio-classic-web-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-machine-learning-studio-classic-web-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-machine-learning-studio-web-service-plans","name":"azure-machine-learning-studio-web-service-plans","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-machine-learning-studio-web-service-plans.svg","isIsometric":false,"collection":"azure"},{"id":"azure-machine-learning-studio-workspaces","name":"azure-machine-learning-studio-workspaces","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-machine-learning-studio-workspaces.svg","isIsometric":false,"collection":"azure"},{"id":"azure-machine-learning","name":"azure-machine-learning","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-machine-learning.svg","isIsometric":false,"collection":"azure"},{"id":"azure-machinesazurearc","name":"azure-machinesazurearc","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-machinesazurearc.svg","isIsometric":false,"collection":"azure"},{"id":"azure-maintenance-configuration","name":"azure-maintenance-configuration","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-maintenance-configuration.svg","isIsometric":false,"collection":"azure"},{"id":"azure-managed-applications-center","name":"azure-managed-applications-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-managed-applications-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-managed-database","name":"azure-managed-database","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-managed-database.svg","isIsometric":false,"collection":"azure"},{"id":"azure-managed-desktop","name":"azure-managed-desktop","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-managed-desktop.svg","isIsometric":false,"collection":"azure"},{"id":"azure-managed-identities","name":"azure-managed-identities","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-managed-identities.svg","isIsometric":false,"collection":"azure"},{"id":"azure-managed-instance-apache-cassandra","name":"azure-managed-instance-apache-cassandra","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-managed-instance-apache-cassandra.svg","isIsometric":false,"collection":"azure"},{"id":"azure-managed-service-fabric","name":"azure-managed-service-fabric","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-managed-service-fabric.svg","isIsometric":false,"collection":"azure"},{"id":"azure-management-groups","name":"azure-management-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-management-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-management-portal","name":"azure-management-portal","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-management-portal.svg","isIsometric":false,"collection":"azure"},{"id":"azure-maps-accounts","name":"azure-maps-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-maps-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-marketplace-management","name":"azure-marketplace-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-marketplace-management.svg","isIsometric":false,"collection":"azure"},{"id":"azure-marketplace","name":"azure-marketplace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-marketplace.svg","isIsometric":false,"collection":"azure"},{"id":"azure-media-file","name":"azure-media-file","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-media-file.svg","isIsometric":false,"collection":"azure"},{"id":"azure-media-service","name":"azure-media-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-media-service.svg","isIsometric":false,"collection":"azure"},{"id":"azure-media","name":"azure-media","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-media.svg","isIsometric":false,"collection":"azure"},{"id":"azure-mesh-applications","name":"azure-mesh-applications","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-mesh-applications.svg","isIsometric":false,"collection":"azure"},{"id":"azure-metrics-advisor","name":"azure-metrics-advisor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-metrics-advisor.svg","isIsometric":false,"collection":"azure"},{"id":"azure-metrics","name":"azure-metrics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-metrics.svg","isIsometric":false,"collection":"azure"},{"id":"azure-migrate","name":"azure-migrate","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-migrate.svg","isIsometric":false,"collection":"azure"},{"id":"azure-mindaro","name":"azure-mindaro","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-mindaro.svg","isIsometric":false,"collection":"azure"},{"id":"azure-mission-landing-zone","name":"azure-mission-landing-zone","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-mission-landing-zone.svg","isIsometric":false,"collection":"azure"},{"id":"azure-mobile-engagement","name":"azure-mobile-engagement","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-mobile-engagement.svg","isIsometric":false,"collection":"azure"},{"id":"azure-mobile","name":"azure-mobile","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-mobile.svg","isIsometric":false,"collection":"azure"},{"id":"azure-modular-data-center","name":"azure-modular-data-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-modular-data-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-module","name":"azure-module","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-module.svg","isIsometric":false,"collection":"azure"},{"id":"azure-monitor-dashboard","name":"azure-monitor-dashboard","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-monitor-dashboard.svg","isIsometric":false,"collection":"azure"},{"id":"azure-monitor","name":"azure-monitor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-monitor.svg","isIsometric":false,"collection":"azure"},{"id":"azure-multi-tenancy","name":"azure-multi-tenancy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-multi-tenancy.svg","isIsometric":false,"collection":"azure"},{"id":"azure-my-customers","name":"azure-my-customers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-my-customers.svg","isIsometric":false,"collection":"azure"},{"id":"azure-nat","name":"azure-nat","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-nat.svg","isIsometric":false,"collection":"azure"},{"id":"azure-netapp-files","name":"azure-netapp-files","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-netapp-files.svg","isIsometric":false,"collection":"azure"},{"id":"azure-network-function-manager","name":"azure-network-function-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-network-function-manager.svg","isIsometric":false,"collection":"azure"},{"id":"azure-network-interfaces","name":"azure-network-interfaces","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-network-interfaces.svg","isIsometric":false,"collection":"azure"},{"id":"azure-network-manager","name":"azure-network-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-network-manager.svg","isIsometric":false,"collection":"azure"},{"id":"azure-network-security-groups","name":"azure-network-security-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-network-security-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-network-watcher","name":"azure-network-watcher","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-network-watcher.svg","isIsometric":false,"collection":"azure"},{"id":"azure-notification-hub-namespaces","name":"azure-notification-hub-namespaces","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-notification-hub-namespaces.svg","isIsometric":false,"collection":"azure"},{"id":"azure-notification-hubs","name":"azure-notification-hubs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-notification-hubs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-object-understanding","name":"azure-object-understanding","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-object-understanding.svg","isIsometric":false,"collection":"azure"},{"id":"azure-offers","name":"azure-offers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-offers.svg","isIsometric":false,"collection":"azure"},{"id":"azure-on-premises-data-gateways","name":"azure-on-premises-data-gateways","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-on-premises-data-gateways.svg","isIsometric":false,"collection":"azure"},{"id":"azure-open-supply-chain-platform","name":"azure-open-supply-chain-platform","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-open-supply-chain-platform.svg","isIsometric":false,"collection":"azure"},{"id":"azure-operation-log-classic","name":"azure-operation-log-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-operation-log-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-orbital","name":"azure-orbital","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-orbital.svg","isIsometric":false,"collection":"azure"},{"id":"azure-os-images-classic","name":"azure-os-images-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-os-images-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-outbound-connection","name":"azure-outbound-connection","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-outbound-connection.svg","isIsometric":false,"collection":"azure"},{"id":"azure-partner-namespace","name":"azure-partner-namespace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-partner-namespace.svg","isIsometric":false,"collection":"azure"},{"id":"azure-partner-registration","name":"azure-partner-registration","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-partner-registration.svg","isIsometric":false,"collection":"azure"},{"id":"azure-partner-topic","name":"azure-partner-topic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-partner-topic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-peering-service","name":"azure-peering-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-peering-service.svg","isIsometric":false,"collection":"azure"},{"id":"azure-pim","name":"azure-pim","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-pim.svg","isIsometric":false,"collection":"azure"},{"id":"azure-plans","name":"azure-plans","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-plans.svg","isIsometric":false,"collection":"azure"},{"id":"azure-policy","name":"azure-policy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-policy.svg","isIsometric":false,"collection":"azure"},{"id":"azure-power-up","name":"azure-power-up","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-power-up.svg","isIsometric":false,"collection":"azure"},{"id":"azure-power","name":"azure-power","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-power.svg","isIsometric":false,"collection":"azure"},{"id":"azure-powershell","name":"azure-powershell","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-powershell.svg","isIsometric":false,"collection":"azure"},{"id":"azure-preview-features","name":"azure-preview-features","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-preview-features.svg","isIsometric":false,"collection":"azure"},{"id":"azure-preview","name":"azure-preview","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-preview.svg","isIsometric":false,"collection":"azure"},{"id":"azure-private-link-hub","name":"azure-private-link-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-private-link-hub.svg","isIsometric":false,"collection":"azure"},{"id":"azure-private-link-service","name":"azure-private-link-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-private-link-service.svg","isIsometric":false,"collection":"azure"},{"id":"azure-private-link","name":"azure-private-link","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-private-link.svg","isIsometric":false,"collection":"azure"},{"id":"azure-private-mobile-network","name":"azure-private-mobile-network","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-private-mobile-network.svg","isIsometric":false,"collection":"azure"},{"id":"azure-process-explorer","name":"azure-process-explorer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-process-explorer.svg","isIsometric":false,"collection":"azure"},{"id":"azure-production-ready-database","name":"azure-production-ready-database","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-production-ready-database.svg","isIsometric":false,"collection":"azure"},{"id":"azure-proximity-placement-groups","name":"azure-proximity-placement-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-proximity-placement-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-public-ip-addresses-classic","name":"azure-public-ip-addresses-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-public-ip-addresses-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-public-ip-addresses","name":"azure-public-ip-addresses","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-public-ip-addresses.svg","isIsometric":false,"collection":"azure"},{"id":"azure-public-ip-prefixes","name":"azure-public-ip-prefixes","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-public-ip-prefixes.svg","isIsometric":false,"collection":"azure"},{"id":"azure-purview-accounts","name":"azure-purview-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-purview-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-quickstart-center","name":"azure-quickstart-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-quickstart-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-quotas","name":"azure-quotas","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-quotas.svg","isIsometric":false,"collection":"azure"},{"id":"azure-recent","name":"azure-recent","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-recent.svg","isIsometric":false,"collection":"azure"},{"id":"azure-recovery-services-vaults","name":"azure-recovery-services-vaults","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-recovery-services-vaults.svg","isIsometric":false,"collection":"azure"},{"id":"azure-region-management","name":"azure-region-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-region-management.svg","isIsometric":false,"collection":"azure"},{"id":"azure-relays","name":"azure-relays","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-relays.svg","isIsometric":false,"collection":"azure"},{"id":"azure-remote-rendering","name":"azure-remote-rendering","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-remote-rendering.svg","isIsometric":false,"collection":"azure"},{"id":"azure-reservations","name":"azure-reservations","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-reservations.svg","isIsometric":false,"collection":"azure"},{"id":"azure-reserved-capacity-groups","name":"azure-reserved-capacity-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-reserved-capacity-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-reserved-capacity","name":"azure-reserved-capacity","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-reserved-capacity.svg","isIsometric":false,"collection":"azure"},{"id":"azure-reserved-ip-addresses-classic","name":"azure-reserved-ip-addresses-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-reserved-ip-addresses-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-resource-explorer","name":"azure-resource-explorer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-resource-explorer.svg","isIsometric":false,"collection":"azure"},{"id":"azure-resource-graph-explorer","name":"azure-resource-graph-explorer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-resource-graph-explorer.svg","isIsometric":false,"collection":"azure"},{"id":"azure-resource-group-list","name":"azure-resource-group-list","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-resource-group-list.svg","isIsometric":false,"collection":"azure"},{"id":"azure-resource-groups","name":"azure-resource-groups","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-resource-groups.svg","isIsometric":false,"collection":"azure"},{"id":"azure-resource-linked","name":"azure-resource-linked","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-resource-linked.svg","isIsometric":false,"collection":"azure"},{"id":"azure-resource-management-private-link","name":"azure-resource-management-private-link","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-resource-management-private-link.svg","isIsometric":false,"collection":"azure"},{"id":"azure-resource-mover","name":"azure-resource-mover","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-resource-mover.svg","isIsometric":false,"collection":"azure"},{"id":"azure-restore-points-collections","name":"azure-restore-points-collections","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-restore-points-collections.svg","isIsometric":false,"collection":"azure"},{"id":"azure-restore-points","name":"azure-restore-points","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-restore-points.svg","isIsometric":false,"collection":"azure"},{"id":"azure-route-filters","name":"azure-route-filters","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-route-filters.svg","isIsometric":false,"collection":"azure"},{"id":"azure-route-tables","name":"azure-route-tables","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-route-tables.svg","isIsometric":false,"collection":"azure"},{"id":"azure-rtos","name":"azure-rtos","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-rtos.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sap-azure-monitor","name":"azure-sap-azure-monitor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sap-azure-monitor.svg","isIsometric":false,"collection":"azure"},{"id":"azure-savings-plan","name":"azure-savings-plan","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-savings-plan.svg","isIsometric":false,"collection":"azure"},{"id":"azure-scale","name":"azure-scale","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-scale.svg","isIsometric":false,"collection":"azure"},{"id":"azure-scheduler-job-collections","name":"azure-scheduler-job-collections","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-scheduler-job-collections.svg","isIsometric":false,"collection":"azure"},{"id":"azure-scheduler","name":"azure-scheduler","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-scheduler.svg","isIsometric":false,"collection":"azure"},{"id":"azure-search-grid","name":"azure-search-grid","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-search-grid.svg","isIsometric":false,"collection":"azure"},{"id":"azure-search-services","name":"azure-search-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-search-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-search","name":"azure-search","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-search.svg","isIsometric":false,"collection":"azure"},{"id":"azure-security-baselines","name":"azure-security-baselines","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-security-baselines.svg","isIsometric":false,"collection":"azure"},{"id":"azure-security-center","name":"azure-security-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-security-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sendgrid-accounts","name":"azure-sendgrid-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sendgrid-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sentinel","name":"azure-sentinel","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sentinel.svg","isIsometric":false,"collection":"azure"},{"id":"azure-server-farm","name":"azure-server-farm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-server-farm.svg","isIsometric":false,"collection":"azure"},{"id":"azure-service-bus","name":"azure-service-bus","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-service-bus.svg","isIsometric":false,"collection":"azure"},{"id":"azure-service-catalog-mad","name":"azure-service-catalog-mad","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-service-catalog-mad.svg","isIsometric":false,"collection":"azure"},{"id":"azure-service-endpoint-policies","name":"azure-service-endpoint-policies","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-service-endpoint-policies.svg","isIsometric":false,"collection":"azure"},{"id":"azure-service-fabric-clusters","name":"azure-service-fabric-clusters","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-service-fabric-clusters.svg","isIsometric":false,"collection":"azure"},{"id":"azure-service-health","name":"azure-service-health","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-service-health.svg","isIsometric":false,"collection":"azure"},{"id":"azure-service-providers","name":"azure-service-providers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-service-providers.svg","isIsometric":false,"collection":"azure"},{"id":"azure-shared-image-galleries","name":"azure-shared-image-galleries","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-shared-image-galleries.svg","isIsometric":false,"collection":"azure"},{"id":"azure-signalr","name":"azure-signalr","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-signalr.svg","isIsometric":false,"collection":"azure"},{"id":"azure-software-as-a-service","name":"azure-software-as-a-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-software-as-a-service.svg","isIsometric":false,"collection":"azure"},{"id":"azure-software-updates","name":"azure-software-updates","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-software-updates.svg","isIsometric":false,"collection":"azure"},{"id":"azure-solutions","name":"azure-solutions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-solutions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sonic-dash","name":"azure-sonic-dash","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sonic-dash.svg","isIsometric":false,"collection":"azure"},{"id":"azure-spatial-anchor-accounts","name":"azure-spatial-anchor-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-spatial-anchor-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sphere","name":"azure-sphere","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sphere.svg","isIsometric":false,"collection":"azure"},{"id":"azure-spot-vm","name":"azure-spot-vm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-spot-vm.svg","isIsometric":false,"collection":"azure"},{"id":"azure-spot-vmss","name":"azure-spot-vmss","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-spot-vmss.svg","isIsometric":false,"collection":"azure"},{"id":"azure-spring-cloud","name":"azure-spring-cloud","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-spring-cloud.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-data-warehouses","name":"azure-sql-data-warehouses","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-data-warehouses.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-database","name":"azure-sql-database","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-database.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-edge","name":"azure-sql-edge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-edge.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-elastic-pools","name":"azure-sql-elastic-pools","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-elastic-pools.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-managed-instance","name":"azure-sql-managed-instance","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-managed-instance.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-server-registries","name":"azure-sql-server-registries","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-server-registries.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-server-stretch-databases","name":"azure-sql-server-stretch-databases","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-server-stretch-databases.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-server","name":"azure-sql-server","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-server.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql-vm","name":"azure-sql-vm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql-vm.svg","isIsometric":false,"collection":"azure"},{"id":"azure-sql","name":"azure-sql","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-sql.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ssd","name":"azure-ssd","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ssd.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ssh-keys","name":"azure-ssh-keys","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ssh-keys.svg","isIsometric":false,"collection":"azure"},{"id":"azure-ssis-lift-and-shift-ir","name":"azure-ssis-lift-and-shift-ir","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-ssis-lift-and-shift-ir.svg","isIsometric":false,"collection":"azure"},{"id":"azure-stack-edge","name":"azure-stack-edge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-stack-edge.svg","isIsometric":false,"collection":"azure"},{"id":"azure-stack","name":"azure-stack","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-stack.svg","isIsometric":false,"collection":"azure"},{"id":"azure-static-apps","name":"azure-static-apps","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-static-apps.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-accounts-classic","name":"azure-storage-accounts-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-accounts-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-accounts","name":"azure-storage-accounts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-accounts.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-azure-files","name":"azure-storage-azure-files","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-azure-files.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-container","name":"azure-storage-container","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-container.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-explorer","name":"azure-storage-explorer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-explorer.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-queue","name":"azure-storage-queue","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-queue.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-sync-services","name":"azure-storage-sync-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-sync-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storage-tasks","name":"azure-storage-tasks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storage-tasks.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storsimple-data-managers","name":"azure-storsimple-data-managers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storsimple-data-managers.svg","isIsometric":false,"collection":"azure"},{"id":"azure-storsimple-device-managers","name":"azure-storsimple-device-managers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-storsimple-device-managers.svg","isIsometric":false,"collection":"azure"},{"id":"azure-stream-analytics-jobs","name":"azure-stream-analytics-jobs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-stream-analytics-jobs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-subscriptions","name":"azure-subscriptions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-subscriptions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-support-center-blue","name":"azure-support-center-blue","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-support-center-blue.svg","isIsometric":false,"collection":"azure"},{"id":"azure-synapse-analytics","name":"azure-synapse-analytics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-synapse-analytics.svg","isIsometric":false,"collection":"azure"},{"id":"azure-system-topic","name":"azure-system-topic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-system-topic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-table","name":"azure-table","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-table.svg","isIsometric":false,"collection":"azure"},{"id":"azure-tag","name":"azure-tag","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-tag.svg","isIsometric":false,"collection":"azure"},{"id":"azure-tags","name":"azure-tags","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-tags.svg","isIsometric":false,"collection":"azure"},{"id":"azure-template-specs","name":"azure-template-specs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-template-specs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-templates","name":"azure-templates","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-templates.svg","isIsometric":false,"collection":"azure"},{"id":"azure-tenant-properties","name":"azure-tenant-properties","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-tenant-properties.svg","isIsometric":false,"collection":"azure"},{"id":"azure-tenant-status","name":"azure-tenant-status","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-tenant-status.svg","isIsometric":false,"collection":"azure"},{"id":"azure-test-base","name":"azure-test-base","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-test-base.svg","isIsometric":false,"collection":"azure"},{"id":"azure-tfs-vc-repository","name":"azure-tfs-vc-repository","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-tfs-vc-repository.svg","isIsometric":false,"collection":"azure"},{"id":"azure-time-series-data-sets","name":"azure-time-series-data-sets","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-time-series-data-sets.svg","isIsometric":false,"collection":"azure"},{"id":"azure-time-series-insights-access-policies","name":"azure-time-series-insights-access-policies","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-time-series-insights-access-policies.svg","isIsometric":false,"collection":"azure"},{"id":"azure-time-series-insights-environments","name":"azure-time-series-insights-environments","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-time-series-insights-environments.svg","isIsometric":false,"collection":"azure"},{"id":"azure-time-series-insights-event-sources","name":"azure-time-series-insights-event-sources","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-time-series-insights-event-sources.svg","isIsometric":false,"collection":"azure"},{"id":"azure-token-service","name":"azure-token-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-token-service.svg","isIsometric":false,"collection":"azure"},{"id":"azure-toolbox","name":"azure-toolbox","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-toolbox.svg","isIsometric":false,"collection":"azure"},{"id":"azure-traffic-manager-profiles","name":"azure-traffic-manager-profiles","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-traffic-manager-profiles.svg","isIsometric":false,"collection":"azure"},{"id":"azure-translator-text","name":"azure-translator-text","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-translator-text.svg","isIsometric":false,"collection":"azure"},{"id":"azure-troubleshoot","name":"azure-troubleshoot","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-troubleshoot.svg","isIsometric":false,"collection":"azure"},{"id":"azure-universal-print","name":"azure-universal-print","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-universal-print.svg","isIsometric":false,"collection":"azure"},{"id":"azure-update-center","name":"azure-update-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-update-center.svg","isIsometric":false,"collection":"azure"},{"id":"azure-updates","name":"azure-updates","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-updates.svg","isIsometric":false,"collection":"azure"},{"id":"azure-user-privacy","name":"azure-user-privacy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-user-privacy.svg","isIsometric":false,"collection":"azure"},{"id":"azure-user-subscriptions","name":"azure-user-subscriptions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-user-subscriptions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-users","name":"azure-users","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-users.svg","isIsometric":false,"collection":"azure"},{"id":"azure-verifiable-credentials","name":"azure-verifiable-credentials","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-verifiable-credentials.svg","isIsometric":false,"collection":"azure"},{"id":"azure-versions","name":"azure-versions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-versions.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-clusters","name":"azure-virtual-clusters","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-clusters.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-machine","name":"azure-virtual-machine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-machine.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-machines-classic","name":"azure-virtual-machines-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-machines-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-network-gateways","name":"azure-virtual-network-gateways","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-network-gateways.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-networks-classic","name":"azure-virtual-networks-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-networks-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-networks","name":"azure-virtual-networks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-networks.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-router","name":"azure-virtual-router","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-router.svg","isIsometric":false,"collection":"azure"},{"id":"azure-virtual-wans","name":"azure-virtual-wans","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-virtual-wans.svg","isIsometric":false,"collection":"azure"},{"id":"azure-vm-application-definition","name":"azure-vm-application-definition","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-vm-application-definition.svg","isIsometric":false,"collection":"azure"},{"id":"azure-vm-application-version","name":"azure-vm-application-version","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-vm-application-version.svg","isIsometric":false,"collection":"azure"},{"id":"azure-vm-images-classic","name":"azure-vm-images-classic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-vm-images-classic.svg","isIsometric":false,"collection":"azure"},{"id":"azure-vm-scale-sets","name":"azure-vm-scale-sets","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-vm-scale-sets.svg","isIsometric":false,"collection":"azure"},{"id":"azure-wac","name":"azure-wac","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-wac.svg","isIsometric":false,"collection":"azure"},{"id":"azure-web-app-database","name":"azure-web-app-database","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-web-app-database.svg","isIsometric":false,"collection":"azure"},{"id":"azure-web-application-firewall-policieswaf","name":"azure-web-application-firewall-policieswaf","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-web-application-firewall-policieswaf.svg","isIsometric":false,"collection":"azure"},{"id":"azure-web-jobs","name":"azure-web-jobs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-web-jobs.svg","isIsometric":false,"collection":"azure"},{"id":"azure-web-slots","name":"azure-web-slots","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-web-slots.svg","isIsometric":false,"collection":"azure"},{"id":"azure-web-test","name":"azure-web-test","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-web-test.svg","isIsometric":false,"collection":"azure"},{"id":"azure-website-power","name":"azure-website-power","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-website-power.svg","isIsometric":false,"collection":"azure"},{"id":"azure-website-staging","name":"azure-website-staging","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-website-staging.svg","isIsometric":false,"collection":"azure"},{"id":"azure-windows-notification-services","name":"azure-windows-notification-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-windows-notification-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-windows-virtual-desktop","name":"azure-windows-virtual-desktop","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-windows-virtual-desktop.svg","isIsometric":false,"collection":"azure"},{"id":"azure-windows10-core-services","name":"azure-windows10-core-services","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-windows10-core-services.svg","isIsometric":false,"collection":"azure"},{"id":"azure-workbooks","name":"azure-workbooks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-workbooks.svg","isIsometric":false,"collection":"azure"},{"id":"azure-worker-container-app","name":"azure-worker-container-app","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-worker-container-app.svg","isIsometric":false,"collection":"azure"},{"id":"azure-workflow","name":"azure-workflow","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-workflow.svg","isIsometric":false,"collection":"azure"},{"id":"azure-workspaces","name":"azure-workspaces","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azure-workspaces.svg","isIsometric":false,"collection":"azure"},{"id":"azureattestation","name":"azureattestation","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/azure/azureattestation.svg","isIsometric":false,"collection":"azure"},{"id":"_gcp_","name":"_gcp_","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/_gcp_.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-access-context-manager","name":"gcp-access-context-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-access-context-manager.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-administration","name":"gcp-administration","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-administration.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-advanced-agent-modeling","name":"gcp-advanced-agent-modeling","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-advanced-agent-modeling.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-advanced-solutions-lab","name":"gcp-advanced-solutions-lab","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-advanced-solutions-lab.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-agent-assist","name":"gcp-agent-assist","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-agent-assist.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-ai-hub","name":"gcp-ai-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-ai-hub.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-ai-platform-unified","name":"gcp-ai-platform-unified","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-ai-platform-unified.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-ai-platform","name":"gcp-ai-platform","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-ai-platform.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-analytics-hub","name":"gcp-analytics-hub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-analytics-hub.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-anthos-config-management","name":"gcp-anthos-config-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-anthos-config-management.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-anthos-service-mesh","name":"gcp-anthos-service-mesh","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-anthos-service-mesh.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-anthos","name":"gcp-anthos","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-anthos.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-api-analytics","name":"gcp-api-analytics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-api-analytics.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-api-monetization","name":"gcp-api-monetization","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-api-monetization.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-api","name":"gcp-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-apigee-api-platform","name":"gcp-apigee-api-platform","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-apigee-api-platform.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-apigee-sense","name":"gcp-apigee-sense","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-apigee-sense.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-app-engine","name":"gcp-app-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-app-engine.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-artifact-registry","name":"gcp-artifact-registry","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-artifact-registry.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-asset-inventory","name":"gcp-asset-inventory","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-asset-inventory.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-assured-workloads","name":"gcp-assured-workloads","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-assured-workloads.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-automl-natural-language","name":"gcp-automl-natural-language","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-automl-natural-language.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-automl-tables","name":"gcp-automl-tables","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-automl-tables.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-automl-translation","name":"gcp-automl-translation","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-automl-translation.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-automl-video-intelligence","name":"gcp-automl-video-intelligence","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-automl-video-intelligence.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-automl-vision","name":"gcp-automl-vision","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-automl-vision.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-automl","name":"gcp-automl","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-automl.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-bare-metal-solutions","name":"gcp-bare-metal-solutions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-bare-metal-solutions.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-batch","name":"gcp-batch","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-batch.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-beyondcorp","name":"gcp-beyondcorp","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-beyondcorp.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-bigquery","name":"gcp-bigquery","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-bigquery.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-bigtable","name":"gcp-bigtable","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-bigtable.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-billing","name":"gcp-billing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-billing.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-binary-authorization","name":"gcp-binary-authorization","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-binary-authorization.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-catalog","name":"gcp-catalog","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-catalog.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-certificate-authority-service","name":"gcp-certificate-authority-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-certificate-authority-service.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-certificate-manager","name":"gcp-certificate-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-certificate-manager.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-api-gateway","name":"gcp-cloud-api-gateway","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-api-gateway.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-apis","name":"gcp-cloud-apis","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-apis.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-armor","name":"gcp-cloud-armor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-armor.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-asset-inventory","name":"gcp-cloud-asset-inventory","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-asset-inventory.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-audit-logs","name":"gcp-cloud-audit-logs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-audit-logs.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-build","name":"gcp-cloud-build","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-build.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-cdn","name":"gcp-cloud-cdn","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-cdn.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-code","name":"gcp-cloud-code","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-code.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-composer","name":"gcp-cloud-composer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-composer.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-data-fusion","name":"gcp-cloud-data-fusion","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-data-fusion.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-deploy","name":"gcp-cloud-deploy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-deploy.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-deployment-manager","name":"gcp-cloud-deployment-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-deployment-manager.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-dns","name":"gcp-cloud-dns","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-dns.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-domains","name":"gcp-cloud-domains","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-domains.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-ekm","name":"gcp-cloud-ekm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-ekm.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-endpoints","name":"gcp-cloud-endpoints","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-endpoints.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-external-ip-addresses","name":"gcp-cloud-external-ip-addresses","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-external-ip-addresses.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-firewall-rules","name":"gcp-cloud-firewall-rules","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-firewall-rules.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-for-marketing","name":"gcp-cloud-for-marketing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-for-marketing.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-functions","name":"gcp-cloud-functions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-functions.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-generic","name":"gcp-cloud-generic","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-generic.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-gpu","name":"gcp-cloud-gpu","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-gpu.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-healthcare-api","name":"gcp-cloud-healthcare-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-healthcare-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-healthcare-marketplace","name":"gcp-cloud-healthcare-marketplace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-healthcare-marketplace.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-hsm","name":"gcp-cloud-hsm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-hsm.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-ids","name":"gcp-cloud-ids","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-ids.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-inference-api","name":"gcp-cloud-inference-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-inference-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-interconnect","name":"gcp-cloud-interconnect","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-interconnect.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-jobs-api","name":"gcp-cloud-jobs-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-jobs-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-load-balancing","name":"gcp-cloud-load-balancing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-load-balancing.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-logging","name":"gcp-cloud-logging","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-logging.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-media-edge","name":"gcp-cloud-media-edge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-media-edge.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-monitoring","name":"gcp-cloud-monitoring","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-monitoring.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-nat","name":"gcp-cloud-nat","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-nat.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-natural-language-api","name":"gcp-cloud-natural-language-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-natural-language-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-network","name":"gcp-cloud-network","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-network.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-ops","name":"gcp-cloud-ops","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-ops.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-optimization-ai---fleet-routing-api","name":"gcp-cloud-optimization-ai---fleet-routing-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-optimization-ai---fleet-routing-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-optimization-ai","name":"gcp-cloud-optimization-ai","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-optimization-ai.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-router","name":"gcp-cloud-router","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-router.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-routes","name":"gcp-cloud-routes","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-routes.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-run-for-anthos","name":"gcp-cloud-run-for-anthos","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-run-for-anthos.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-run","name":"gcp-cloud-run","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-run.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-scheduler","name":"gcp-cloud-scheduler","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-scheduler.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-security-scanner","name":"gcp-cloud-security-scanner","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-security-scanner.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-shell","name":"gcp-cloud-shell","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-shell.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-spanner","name":"gcp-cloud-spanner","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-spanner.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-sql","name":"gcp-cloud-sql","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-sql.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-storage","name":"gcp-cloud-storage","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-storage.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-tasks","name":"gcp-cloud-tasks","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-tasks.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-test-lab","name":"gcp-cloud-test-lab","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-test-lab.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-tpu","name":"gcp-cloud-tpu","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-tpu.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-translation-api","name":"gcp-cloud-translation-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-translation-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-vision-api","name":"gcp-cloud-vision-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-vision-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-cloud-vpn","name":"gcp-cloud-vpn","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-cloud-vpn.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-compute-engine","name":"gcp-compute-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-compute-engine.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-configuration-management","name":"gcp-configuration-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-configuration-management.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-connectivity-test","name":"gcp-connectivity-test","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-connectivity-test.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-connectors","name":"gcp-connectors","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-connectors.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-contact-center-ai","name":"gcp-contact-center-ai","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-contact-center-ai.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-container-optimized-os","name":"gcp-container-optimized-os","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-container-optimized-os.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-container-registry","name":"gcp-container-registry","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-container-registry.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-data-catalog","name":"gcp-data-catalog","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-data-catalog.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-data-labeling","name":"gcp-data-labeling","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-data-labeling.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-data-layers","name":"gcp-data-layers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-data-layers.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-data-loss-prevention-api","name":"gcp-data-loss-prevention-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-data-loss-prevention-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-data-qna","name":"gcp-data-qna","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-data-qna.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-data-studio","name":"gcp-data-studio","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-data-studio.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-data-transfer","name":"gcp-data-transfer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-data-transfer.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-database-migration-service","name":"gcp-database-migration-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-database-migration-service.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dataflow","name":"gcp-dataflow","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dataflow.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-datalab","name":"gcp-datalab","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-datalab.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dataplex","name":"gcp-dataplex","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dataplex.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-datapol","name":"gcp-datapol","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-datapol.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dataprep","name":"gcp-dataprep","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dataprep.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dataproc-metastore","name":"gcp-dataproc-metastore","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dataproc-metastore.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dataproc","name":"gcp-dataproc","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dataproc.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-datashare","name":"gcp-datashare","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-datashare.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-datastore","name":"gcp-datastore","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-datastore.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-datastream","name":"gcp-datastream","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-datastream.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-debugger","name":"gcp-debugger","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-debugger.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-developer-portal","name":"gcp-developer-portal","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-developer-portal.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dialogflow-cx","name":"gcp-dialogflow-cx","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dialogflow-cx.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dialogflow-insights","name":"gcp-dialogflow-insights","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dialogflow-insights.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-dialogflow","name":"gcp-dialogflow","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-dialogflow.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-document-ai","name":"gcp-document-ai","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-document-ai.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-early-access-center","name":"gcp-early-access-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-early-access-center.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-error-reporting","name":"gcp-error-reporting","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-error-reporting.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-eventarc","name":"gcp-eventarc","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-eventarc.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-filestore","name":"gcp-filestore","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-filestore.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-financial-services-marketplace","name":"gcp-financial-services-marketplace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-financial-services-marketplace.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-firestore","name":"gcp-firestore","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-firestore.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-fleet-engine","name":"gcp-fleet-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-fleet-engine.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-free-trial","name":"gcp-free-trial","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-free-trial.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-game-servers","name":"gcp-game-servers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-game-servers.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-gce-systems-management","name":"gcp-gce-systems-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-gce-systems-management.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-genomics","name":"gcp-genomics","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-genomics.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-gke-on-prem","name":"gcp-gke-on-prem","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-gke-on-prem.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-google-cloud-marketplace","name":"gcp-google-cloud-marketplace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-google-cloud-marketplace.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-google-kubernetes-engine","name":"gcp-google-kubernetes-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-google-kubernetes-engine.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-google-maps-platform","name":"gcp-google-maps-platform","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-google-maps-platform.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-healthcare-nlp-api","name":"gcp-healthcare-nlp-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-healthcare-nlp-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-home","name":"gcp-home","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-home.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-identity-and-access-management","name":"gcp-identity-and-access-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-identity-and-access-management.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-identity-aware-proxy","name":"gcp-identity-aware-proxy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-identity-aware-proxy.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-identity-platform","name":"gcp-identity-platform","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-identity-platform.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-iot-core","name":"gcp-iot-core","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-iot-core.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-iot-edge","name":"gcp-iot-edge","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-iot-edge.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-key-access-justifications","name":"gcp-key-access-justifications","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-key-access-justifications.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-key-management-service","name":"gcp-key-management-service","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-key-management-service.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-kuberun","name":"gcp-kuberun","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-kuberun.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-launcher","name":"gcp-launcher","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-launcher.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-local-ssd","name":"gcp-local-ssd","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-local-ssd.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-looker","name":"gcp-looker","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-looker.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-managed-service-for-microsoft-active-directory","name":"gcp-managed-service-for-microsoft-active-directory","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-managed-service-for-microsoft-active-directory.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-media-translation-api","name":"gcp-media-translation-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-media-translation-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-memorystore","name":"gcp-memorystore","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-memorystore.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-migrate-for-anthos","name":"gcp-migrate-for-anthos","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-migrate-for-anthos.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-migrate-for-compute-engine","name":"gcp-migrate-for-compute-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-migrate-for-compute-engine.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-my-cloud","name":"gcp-my-cloud","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-my-cloud.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-network-connectivity-center","name":"gcp-network-connectivity-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-network-connectivity-center.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-network-intelligence-center","name":"gcp-network-intelligence-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-network-intelligence-center.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-network-security","name":"gcp-network-security","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-network-security.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-network-tiers","name":"gcp-network-tiers","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-network-tiers.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-network-topology","name":"gcp-network-topology","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-network-topology.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-onboarding","name":"gcp-onboarding","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-onboarding.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-os-configuration-management","name":"gcp-os-configuration-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-os-configuration-management.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-os-inventory-management","name":"gcp-os-inventory-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-os-inventory-management.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-os-patch-management","name":"gcp-os-patch-management","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-os-patch-management.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-partner-interconnect","name":"gcp-partner-interconnect","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-partner-interconnect.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-partner-portal","name":"gcp-partner-portal","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-partner-portal.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-performance-dashboard","name":"gcp-performance-dashboard","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-performance-dashboard.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-permissions","name":"gcp-permissions","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-permissions.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-persistent-disk","name":"gcp-persistent-disk","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-persistent-disk.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-phishing-protection","name":"gcp-phishing-protection","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-phishing-protection.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-policy-analyzer","name":"gcp-policy-analyzer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-policy-analyzer.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-premium-network-tier","name":"gcp-premium-network-tier","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-premium-network-tier.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-private-connectivity","name":"gcp-private-connectivity","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-private-connectivity.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-private-service-connect","name":"gcp-private-service-connect","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-private-service-connect.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-producer-portal","name":"gcp-producer-portal","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-producer-portal.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-profiler","name":"gcp-profiler","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-profiler.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-project","name":"gcp-project","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-project.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-pubsub","name":"gcp-pubsub","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-pubsub.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-quantum-engine","name":"gcp-quantum-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-quantum-engine.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-quotas","name":"gcp-quotas","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-quotas.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-real-world-insights","name":"gcp-real-world-insights","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-real-world-insights.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-recommendations-ai","name":"gcp-recommendations-ai","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-recommendations-ai.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-release-notes","name":"gcp-release-notes","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-release-notes.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-retail-api","name":"gcp-retail-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-retail-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-risk-manager","name":"gcp-risk-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-risk-manager.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-runtime-config","name":"gcp-runtime-config","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-runtime-config.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-secret-manager","name":"gcp-secret-manager","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-secret-manager.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-security-command-center","name":"gcp-security-command-center","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-security-command-center.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-security-health-advisor","name":"gcp-security-health-advisor","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-security-health-advisor.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-security-key-enforcement","name":"gcp-security-key-enforcement","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-security-key-enforcement.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-security","name":"gcp-security","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-security.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-service-discovery","name":"gcp-service-discovery","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-service-discovery.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-speech-to-text","name":"gcp-speech-to-text","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-speech-to-text.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-stackdriver","name":"gcp-stackdriver","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-stackdriver.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-standard-network-tier","name":"gcp-standard-network-tier","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-standard-network-tier.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-stream-suite","name":"gcp-stream-suite","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-stream-suite.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-support","name":"gcp-support","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-support.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-tensorflow-enterprise","name":"gcp-tensorflow-enterprise","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-tensorflow-enterprise.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-text-to-speech","name":"gcp-text-to-speech","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-text-to-speech.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-tools-for-powershell","name":"gcp-tools-for-powershell","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-tools-for-powershell.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-trace","name":"gcp-trace","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-trace.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-traffic-director","name":"gcp-traffic-director","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-traffic-director.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-transfer-appliance","name":"gcp-transfer-appliance","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-transfer-appliance.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-transfer","name":"gcp-transfer","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-transfer.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-user-preferences","name":"gcp-user-preferences","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-user-preferences.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-vertexai","name":"gcp-vertexai","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-vertexai.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-video-intelligence-api","name":"gcp-video-intelligence-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-video-intelligence-api.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-virtual-private-cloud","name":"gcp-virtual-private-cloud","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-virtual-private-cloud.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-visual-inspection","name":"gcp-visual-inspection","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-visual-inspection.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-vmware-engine","name":"gcp-vmware-engine","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-vmware-engine.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-web-risk","name":"gcp-web-risk","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-web-risk.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-web-security-scanner","name":"gcp-web-security-scanner","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-web-security-scanner.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-workflows","name":"gcp-workflows","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-workflows.svg","isIsometric":false,"collection":"gcp"},{"id":"gcp-workload-identity-pool","name":"gcp-workload-identity-pool","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/gcp/gcp-workload-identity-pool.svg","isIsometric":false,"collection":"gcp"},{"id":"_k8s_","name":"_k8s_","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/_k8s_.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-api","name":"k8s-api","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-api.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-c-c-m","name":"k8s-c-c-m","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-c-c-m.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-c-m1","name":"k8s-c-m1","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-c-m1.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-c-role","name":"k8s-c-role","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-c-role.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-cm","name":"k8s-cm","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-cm.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-control-plane","name":"k8s-control-plane","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-control-plane.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-crb","name":"k8s-crb","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-crb.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-crd","name":"k8s-crd","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-crd.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-cronjob","name":"k8s-cronjob","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-cronjob.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-deploy","name":"k8s-deploy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-deploy.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-ds","name":"k8s-ds","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-ds.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-ep","name":"k8s-ep","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-ep.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-etcd","name":"k8s-etcd","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-etcd.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-group","name":"k8s-group","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-group.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-hpa","name":"k8s-hpa","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-hpa.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-ing","name":"k8s-ing","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-ing.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-job","name":"k8s-job","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-job.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-k-proxy","name":"k8s-k-proxy","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-k-proxy.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-kubelet","name":"k8s-kubelet","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-kubelet.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-limits","name":"k8s-limits","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-limits.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-netpol","name":"k8s-netpol","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-netpol.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-node","name":"k8s-node","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-node.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-ns","name":"k8s-ns","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-ns.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-pod","name":"k8s-pod","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-pod.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-psp","name":"k8s-psp","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-psp.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-pv","name":"k8s-pv","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-pv.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-pvc","name":"k8s-pvc","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-pvc.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-quota","name":"k8s-quota","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-quota.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-rb","name":"k8s-rb","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-rb.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-role","name":"k8s-role","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-role.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-rs","name":"k8s-rs","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-rs.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-sa","name":"k8s-sa","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-sa.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-sc","name":"k8s-sc","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-sc.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-sched","name":"k8s-sched","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-sched.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-secret","name":"k8s-secret","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-secret.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-sts","name":"k8s-sts","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-sts.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-svc","name":"k8s-svc","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-svc.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-user","name":"k8s-user","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-user.svg","isIsometric":false,"collection":"kubernetes"},{"id":"k8s-vol","name":"k8s-vol","url":"https://isoflow-public.s3.eu-west-2.amazonaws.com/isopacks/kubernetes/k8s-vol.svg","isIsometric":false,"collection":"kubernetes"}],"components":[{"id":"caaf10ed-a5c9-476c-b5e6-f2fe679f5c81","name":"Database","icon":"storage","tags":[]},{"id":"51686dab-8338-443a-858f-384e4d2f2776","name":"Env Vars","icon":"vm","tags":[]},{"id":"796e917a-715b-4b54-87d5-845b3b1e4ca1","name":"JSON","icon":"document","tags":[],"description":"pgwatch is a flexible PostgreSQL-specific monitoring solution, offering a comprehensive view of database performance and health. It provides a user-friendly interface through Grafana dashboards, allowing users to easily inspect various metrics and trends.
In the world of database management, monitoring plays a crucial role in ensuring stability, performance, and security. With a constant need to keep databases healthy and responsive, pgwatch answers three fundamental questions:
"},{"location":"index.html#what","title":"What?","text":"What sources to monitor?
pgwatch is designed specifically for monitoring PostgreSQL databases and related infrastructure. It covers a wide range of components crucial for PostgreSQL ecosystems, including:
This extended monitoring capability allows you to gain a comprehensive view of not only your PostgreSQL databases but also the surrounding infrastructure that supports and enhances your database operations.
"},{"location":"index.html#how","title":"How?","text":"What metrics are available for monitoring?
pgwatch provides out-of-the-box support for almost all essential PostgreSQL metrics, including:
In addition to the standard metrics, pgwatch can be easily extended to monitor custom metrics based on your specific needs. The solution offers flexibility to fine-tune monitoring details and the aggressiveness of data collection.
"},{"location":"index.html#where","title":"Where?","text":"Where are the measurements stored and where can users inspect the dashboards?
For a detailed list of all features and capabilities, please refer to the Features page.
"},{"location":"concept/components.html","title":"Components","text":"The main development idea around pgwatch was to do the minimal work needed and not to reinvent the wheel - meaning that pgwatch is mostly just about gluing together already some proven pieces of software for metrics storage and using Grafana for dashboarding. So here is a listing of components that can be used to build up a monitoring setup around the pgwatch metrics collector. Note that most components are not mandatory and for tasks like metrics storage there are many components to choose from.
"},{"location":"concept/components.html#the-metrics-gathering-daemon","title":"The metrics gathering daemon","text":"The metrics collector, written in Go, is the only mandatory and most critical component of the whole solution. The main task of the pgwatch collector / daemon is pretty simple - reading the configuration and metric definitions, fetching the metrics from the configured databases using the configured connection info and finally storing the metric measurements to some other database, or just exposing them over a port for scraping in case of Prometheus mode.
"},{"location":"concept/components.html#configuration","title":"Configuration","text":"The configuration says which databases, how often and with which metrics (SQL queries) are to be gathered. There are 2 options to store the configuration:
Many options here so that one can for example go for maximum storage effectiveness or pick something where they already know the query language:
"},{"location":"concept/components.html#postgresql","title":"PostgreSQL","text":"PostgreSQL is a world's most advanced Open Source RDBMS.
Postgres storage is based on the JSONB datatype so minimally version 9.4+ is required, but for bigger setups where partitioning is a must, v11+ is needed. Any already existing Postgres database will do the trick, see the Bootstrapping the Metrics DB section for details.
"},{"location":"concept/components.html#timescaledb","title":"TimescaleDB","text":"TimescaleDB is a time-series extension for PostgreSQL.
Although technically a plain extension it's often mentioned as a separate database system as it brings custom data compression to the table, enabling huge disk savings over standard Postgres. Note that pgwatch does not use Timescales built-in retention management but a custom version.
"},{"location":"concept/components.html#prometheus","title":"Prometheus","text":"Prometheus is a time series database and monitoring system.
Though Prometheus is not a traditional database system, it's a good choice for monitoring Cloud-like environments as the monitoring targets don't need to know too much about how actual monitoring will be carried out later and also Prometheus has a nice fault-tolerant alerting system for enterprise needs. By default, Prometheus is not set up for long term metrics storage!
"},{"location":"concept/components.html#json-files","title":"JSON files","text":"Plain text files for testing / special use cases.
"},{"location":"concept/components.html#the-web-ui","title":"The Web UI","text":"The second homegrown component of the pgwatch solution is an optional and relatively simple Web UI for administering details of the monitoring configuration like which databases should be monitored, with which metrics and intervals. Besides that there are some basic overview tables to analyze the gathered data and also possibilities to delete unneeded metric data (when removing a test host for example).
"},{"location":"concept/components.html#metrics-representation","title":"Metrics representation","text":"Standard pgwatch setup uses Grafana for analyzing the gathered metrics data in a visual, point-and-click way. For that a rich set of predefined dashboards for Postgres is provided, that should cover the needs of most users - advanced users would mostly always want to customize some aspects though, so it's not meant as a one-size-fits-all solution. Also as metrics are stored in a DB, they can be visualized or processed in any other way.
"},{"location":"concept/components.html#component-diagram","title":"Component diagram","text":"Component diagram of a typical setup:
"},{"location":"concept/components.html#component-reuse","title":"Component reuse","text":"All components are loosely coupled, thus for non-pgwatch components (pgwatch components are only the metrics collector) you can decide to make use of an already existing installation of Postgres, Grafana or Prometheus and run additionally just the pgwatch collector.
"},{"location":"concept/installation_options.html","title":"Installation options","text":"Besides freedom of choosing from a set of metric measurements storage options one can also choose how is the monitoring configuration (connect strings, metric sets and intervals) going to be stored.
"},{"location":"concept/installation_options.html#configuration-database-based-operation","title":"Configuration database based operation","text":"This is the original central pull mode depicted on the architecture diagram. It requires a small schema to be rolled out on any Postgres database accessible to the metrics gathering daemon, which will hold the connect strings, metric definition SQLs and preset configurations and some other more minor attributes. For rollout details see the custom installation chapter.
The default Docker demo image cybertecpostgresql/pgwatch-demo
uses this approach.
One can deploy the gatherer daemon(s) decentralized with sources to be monitored defined in simple YAML files. In that case there is no need for the central Postgres configuration database. See the sample.sources.yaml config file for an example.
Note
In this mode you also may want, but not forced, to point out the path to metric definition YAML file when starting the gatherer. Also note that the configuration system supports multiple YAML files in a folder so that you could easily programmatically manage things via Ansible, for example, and you can also use environment variables inside YAML files.
"},{"location":"concept/long_term_installations.html","title":"Long term installations","text":"For long term pgwatch setups the main challenge is to keep the software up-to-date to guarantee stable operation and also to make sure that all DBs are under monitoring.
"},{"location":"concept/long_term_installations.html#keeping-inventory-in-sync","title":"Keeping inventory in sync","text":"Adding new DBs to monitoring and removing those shut down, can become a problem if teams are big, databases are many, and it's done by hand (common for on-premise, non-orchestrated deployments). To combat that, the most typical approach would be to write some script or Cronjob that parses the company's internal inventory database, files or endpoints and translate changes to according CRUD operations on the pgwatch.source table directly.
One could also use the REST API for that purpose.
If pgwatch configuration is kept in YAML files, it should be also relatively easy to automate the maintenance as the configuration can be organized so that one file represent a single monitoring entry, i.e. the --sources
and --metrics
parameters can also refer to a folder of YAML files.
The pgwatch metrics gathering daemon is the core component of the solution alas the most critical one. So it's definitely recommended to update it at least once per year or minimally when some freshly released Postgres major version instances are added to monitoring. New Postgres versions don't necessary mean that something will break, but you'll be missing some newly added metrics, plus the occasional optimizations. See the upgrading chapter for details, but basically the process is very similar to initial installation as the collector doesn't have any state on its own - it's just one executable file.
"},{"location":"concept/long_term_installations.html#metrics-maintenance","title":"Metrics maintenance","text":"Metric definition SQLs are regularly corrected as suggestions and improvements come in and also new ones are added to cover latest Postgres versions, so would make sense to refresh them 1-2x per year.
If using built-in metrics, just installing newer pre-built RPM / DEB packages will do the trick automatically but for configuration database based setups you'd need to follow a simple process described here.
"},{"location":"concept/long_term_installations.html#dashboard-maintenance","title":"Dashboard maintenance","text":"Same as with metrics, also the built-in Grafana dashboards are being actively updates, so would make sense to refresh them occasionally also. You could manually just re-import some dashboards of interest from JSON files in [/etc/pgwatch/grafana-dashboards] folder or from GitHub.
Info
Notable new dashboards are usually listed also in release notes and most dashboards also have a sample screenshots available.
"},{"location":"concept/long_term_installations.html#storage-monitoring","title":"Storage monitoring","text":"In addition to all that you should at least initially periodically monitor the metric measurements databases size as it can grow quite a lot (especially when using Postgres for storage) when the monitored databases have hundreds of tables and indexes, and if a lot of unique SQLs are used and pg_stat_statements
monitoring is enabled. If the storage grows too fast, one can increase the metric intervals (especially for \"table_stats\", \"index_stats\" and \"stat_statements\") or decrease the data retention periods via --retention
param.
For easy configuration management (adding databases to monitoring, adding metrics) there is a Web application bundled.
Besides managing the metrics gathering configurations, the two other useful features for the Web UI would be the possibility to look at the logs.
Default port: 8080
Sample screenshot of the Web UI:
"},{"location":"concept/web_ui.html#web-ui-security","title":"Web UI security","text":"By default, the Web UI is not secured - anyone can view and modify the monitoring configuration. If some security is needed though it can be enabled:
HTTPS
Password protection is controlled by --web-user
, --web-password
command-line parameters or PW_WEBUSER
, PW_WEBPASSWORD
environmental variables.
Note
It's better to use standard LibPQ .pgpass files so there's no requirement to store any passwords in pgwatch config database or YAML config file.
For security sensitive environments make sure to always deploy password protection together with SSL, as it uses a standard cookie based techniques vulnerable to snooping / MITM attacks.
"},{"location":"developer/CODE_OF_CONDUCT.html","title":"Citizen Code of Conduct","text":""},{"location":"developer/CODE_OF_CONDUCT.html#1-purpose","title":"1. Purpose","text":"A primary goal of pgwatch is to be inclusive to the largest number of contributors, with the most varied and diverse backgrounds possible. As such, we are committed to providing a friendly, safe and welcoming environment for all, regardless of gender, sexual orientation, ability, ethnicity, socioeconomic status, and religion (or lack thereof).
This code of conduct outlines our expectations for all those who participate in our community, as well as the consequences for unacceptable behavior.
We invite all those who participate in pgwatch to help us create safe and positive experiences for everyone.
"},{"location":"developer/CODE_OF_CONDUCT.html#2-open-sourceculturetech-citizenship","title":"2. Open [Source/Culture/Tech] Citizenship","text":"A supplemental goal of this Code of Conduct is to increase open [source/culture/tech] citizenship by encouraging participants to recognize and strengthen the relationships between our actions and their effects on our community.
Communities mirror the societies in which they exist and positive action is essential to counteract the many forms of inequality and abuses of power that exist in society.
If you see someone who is making an extra effort to ensure our community is welcoming, friendly, and encourages all participants to contribute to the fullest extent, we want to know.
"},{"location":"developer/CODE_OF_CONDUCT.html#3-expected-behavior","title":"3. Expected Behavior","text":"The following behaviors are expected and requested of all community members:
The following behaviors are considered harassment and are unacceptable within our community:
No weapons will be allowed at pgwatch events, community spaces, or in other spaces covered by the scope of this Code of Conduct. Weapons include but are not limited to guns, explosives (including fireworks), and large knives such as those used for hunting or display, as well as any other item used for the purpose of causing injury or harm to others. Anyone seen in possession of one of these items will be asked to leave immediately, and will only be allowed to return without the weapon. Community members are further expected to comply with all state and local laws on this matter.
"},{"location":"developer/CODE_OF_CONDUCT.html#6-consequences-of-unacceptable-behavior","title":"6. Consequences of Unacceptable Behavior","text":"Unacceptable behavior from any community member, including sponsors and those with decision-making authority, will not be tolerated.
Anyone asked to stop unacceptable behavior is expected to comply immediately.
If a community member engages in unacceptable behavior, the community organizers may take any action they deem appropriate, up to and including a temporary ban or permanent expulsion from the community without warning (and without refund in the case of a paid event).
"},{"location":"developer/CODE_OF_CONDUCT.html#7-reporting-guidelines","title":"7. Reporting Guidelines","text":"If you are subject to or witness unacceptable behavior, or have any other concerns, please notify a community organizer as soon as possible. Pavlo Golub.
Additionally, community organizers are available to help community members engage with local law enforcement or to otherwise help those experiencing unacceptable behavior feel safe. In the context of in-person events, organizers will also provide escorts as desired by the person experiencing distress.
"},{"location":"developer/CODE_OF_CONDUCT.html#8-addressing-grievances","title":"8. Addressing Grievances","text":"If you feel you have been falsely or unfairly accused of violating this Code of Conduct, you should notify cybertec-postgresql with a concise description of your grievance. Your grievance will be handled in accordance with our existing governing policies.
"},{"location":"developer/CODE_OF_CONDUCT.html#9-scope","title":"9. Scope","text":"We expect all community participants (contributors, paid or otherwise; sponsors; and other guests) to abide by this Code of Conduct in all community venues--online and in-person--as well as in all one-on-one communications pertaining to community business.
This code of conduct and its related procedures also applies to unacceptable behavior occurring outside the scope of community activities when such behavior has the potential to adversely affect the safety and well-being of community members.
"},{"location":"developer/CODE_OF_CONDUCT.html#10-contact-info","title":"10. Contact info","text":"Pavlo Golub Cybertec
"},{"location":"developer/CODE_OF_CONDUCT.html#11-license-and-attribution","title":"11. License and attribution","text":"The Citizen Code of Conduct is distributed by Stumptown Syndicate under a Creative Commons Attribution-ShareAlike license.
Portions of text derived from the Django Code of Conduct and the Geek Feminism Anti-Harassment Policy.
Revision 2.3. Posted 6 March 2017.
Revision 2.2. Posted 4 February 2016.
Revision 2.1. Posted 23 June 2014.
Revision 2.0, adopted by the Stumptown Syndicate board on 10 January 2013. Posted 17 March 2013.
"},{"location":"developer/LICENSE.html","title":"License","text":"BSD 3-Clause License
Copyright (c) 2022, CYBERTEC PostgreSQL International GmbH All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"},{"location":"developer/contributing.html","title":"Contributing to PGWatch","text":"Thank you for considering contributing to PGWatch! Here are some guidelines to help you get started.
"},{"location":"developer/contributing.html#code-of-conduct","title":"Code of Conduct","text":"Please read and follow our Code of Conduct.
"},{"location":"developer/contributing.html#communication-channels","title":"Communication Channels","text":"The main communication channel for the project is the GitHub repository. Feel free to open issues and participate in discussions there.
"},{"location":"developer/contributing.html#setting-up-the-development-environment","title":"Setting Up the Development Environment","text":"To set up the development environment, please refer to the instructions in the README.md file. We use Docker Compose to simplify the setup process.
"},{"location":"developer/contributing.html#how-to-report-bugs","title":"How to Report Bugs","text":"If you encounter any bugs, please report them by opening an issue in the GitHub repository issues section. Provide as much detail as possible to help us understand and resolve the issue.
"},{"location":"developer/contributing.html#how-to-request-features","title":"How to Request Features","text":"If you have a feature request, please start a discussion in the GitHub repository discussions section. We value your feedback and ideas!
"},{"location":"developer/contributing.html#submitting-changes","title":"Submitting Changes","text":"Before submitting any changes, please discuss them in the GitHub repository discussions section. This helps ensure that your contribution aligns with the project goals and prevents duplicate efforts.
When you are ready to submit your changes, create a pull request. Make sure your pull request:
We follow the Go Style Guide. Please ensure your code adheres to these guidelines.
"},{"location":"developer/contributing.html#testing","title":"Testing","text":"We require tests for all changes. Please use the standard Go testing facilities. Ensure that all tests pass before submitting your pull request.
"},{"location":"developer/contributing.html#documentation","title":"Documentation","text":"Documentation for the project resides in the same repository. If you make changes that require documentation updates, please include those changes in your pull request.
"},{"location":"developer/contributing.html#contributor-license-agreement-cla","title":"Contributor License Agreement (CLA)","text":"We do not require contributors to sign a Contributor License Agreement (CLA). By submitting a pull request, you agree that your contributions are submitted under the same license as the project.
We appreciate your contributions and efforts to improve PGWatch. If you have any questions, feel free to reach out through the GitHub repository.
Thank you!
"},{"location":"developer/godoc.html","title":"Documentation","text":""},{"location":"gallery/dashboards.html","title":"Dashboards","text":"Dashboards are a collection of visualizations that are displayed in a single page. They are useful for monitoring and analyzing data.
Health Check
Global Health
Biggest Tables Treemap
Checkpointer, Background Writer, I/O statistics
Indexes Overview
Database Overview With Time Lag Comparison
Database Overview for Developers (Unprivileged)
Global Databases Overview
Change Events
PostgreSQL Versions Overview
Recommendations
Replication Lag
Server Log Events
Realtime Execution Plans
Stat Activity Realtime
Stat Statements SQL Search
Stat Statements Top Visual
Stat Statements Top
System Statistics
Tables Top
"},{"location":"gallery/webui.html","title":"Web User Interface","text":"The Web User Interface (WebUI) allows you to interact with the pgwatch and control monitored sources, metrics and presets definitions, and view and logs.
Sources
Metrics
Presets
Logs
"},{"location":"howto/dashboarding_alerting.html","title":"Grafana intro","text":"To display the gathered and stored metrics the pgwatch project has decided to rely heavily on the popular Grafana dashboarding solution. This means only though that it's installed in the default Docker images and there's a set of predefined dashboards available to cover most of the metrics gathered via the Preset Configs.
This does not mean though that Grafana is in any way tightly coupled with project's other components - quite the opposite actually, one can use any other means / tools to use the metrics data gathered by the pgwatch daemon.
Currently, there are around 30 preset dashboards available for PostgreSQL data sources. Due to that nowadays, if metric gathering volumes are not a problem, we recommend using Postgres storage for most users.
Note though that most users will probably want to always adjust the built-in dashboards slightly (colors, roundings, etc.), so that they should be taken only as examples to quickly get started. Also note that in case of changes it's not recommended to change the built-in ones, but use the Save as features - this will allow later to easily update all the dashboards en masse per script, without losing any custom user changes.
Links:
Built-in dashboards for PostgreSQL (TimescaleDB) storage
Screenshots of pgwatch default dashboards
The online Demo site
"},{"location":"howto/dashboarding_alerting.html#alerting","title":"Alerting","text":"Alerting is very conveniently also supported by Grafana in a simple point-and-click style - see here for the official documentation. In general all more popular notification services are supported, and it's pretty much the easiest way to quickly start with PostgreSQL alerting on a smaller scale. For enterprise usage with hundreds of instances it's might get too \"clicky\" though and there are also some limitations - currently you can set alerts only on Graph panels and there must be no variables used in the query so you cannot use most of the pre-created pgwatch graphs, but need to create your own.
Nevertheless, alerting via Grafana is s a good option for lighter use cases and there's also a preset dashboard template named \"Alert Template\" from the pgwatch project to give you some ideas on what to alert on.
Note though that alerting is always a bit of a complex topic - it requires good understanding of PostgreSQL operational metrics and also business criticality background infos, so we don't want to be too opinionated here, and it's up to the users to implement.
"},{"location":"howto/metrics_db_bootstrap.html","title":"Choosing a Database","text":"pgwatch supports multiple databases for storing metrics measurements. The following databases are supported:
We will use PostgreSQL in this guide. But the steps are similar for other databases. It's up to you to choose the database that best fits your needs and set it up accordingly.
"},{"location":"howto/metrics_db_bootstrap.html#creating-the-database","title":"Creating the Database","text":"First, we need to create a database for storing the metrics measurements. We will use the psql
command-line tool to create the database. You can also use a GUI tool like pgAdmin to create the database.
Let's assume we want to create a database named measurements
on a completely fresh PostgreSQL installation. It is wise to use a special role for the metrics database, so we will create a role named pgwatch
and assign it to the measurements
database.
$ psql -U postgres -h 10.0.0.42 -p 5432 -d postgres\npsql (17.2)\n\npostgres=# CREATE ROLE pgwatch WITH LOGIN PASSWORD 'pgwatchadmin';\nCREATE ROLE\n\npostgres=# CREATE DATABASE measurements OWNER pgwatch;\nCREATE DATABASE\n
That's it! We have created a database named measurements
with the owner pgwatch
. Now we can proceed to the next step.
pgwatch will automatically create the necessary tables and indexes in the database when it starts. You don't need to create any tables or indexes manually.
You can now configure pgwatch to use the measurements
database as the sink for storing metrics measurements.
$ pgwatch --sources=/etc/sources.yaml --sink=postgresql://pgwatch@10.0.0.42/measurements\n[INFO] [sink:postgresql://pgwatch@10.0.0.42/measurements] Initialising measurements database...\n[INFO] [sink:postgresql://pgwatch@10.0.0.42/measurements] Measurements sink activated\n...\n
That's it! You have successfully bootstrapped the metrics measurements database for pgwatch. You can now start collecting metrics from your sources and storing them in the database.
If now you want to see the tables created by pgwatch in the measurements
database, you can connect to the database using the psql
command-line tool and list the tables.
$ psql -U pgwatch -h 10.0.0.42 -p 5432 -d measurements\npsql (17.2)\n\nmeasurements=> \\dn\n List of schemas\n Name | Owner\n---------------+---------\n admin | pgwatch\n subpartitions | pgwatch\n(3 rows)\n
You can see that pgwatch has created the admin
and subpartitions
schemas in the measurements
database. These schemas contain the tables and indexes used by pgwatch to store metrics measurements. You may examine these schemas to understand how pgwatch stores metrics measurements in the database.
Tip
You can also add --log-level=debug
command-line parameter to see every SQL query executed by pgwatch. This can be useful for debugging purposes. But remember that this will log a lot of information, so it is wise to use it with empty sources this time, meaning there are no database to monitor yet.
Min 1GB of RAM is required for a Docker setup using Postgres to store metrics.
The gatherer alone needs typically less than 50 MB if the metric measurements are stored online. Memory consumption will increase a lot when the metrics store is offline though, as then metrics are cached in RAM in ring buffer style up to a limit of 10k data points (for all databases) and then memory consumption is dependent on how \"wide\" are the metrics gathered.
Storage requirements vary a lot and are hard to predict.
10GB of disk space should be enough though for monitoring a single DB with \"exhaustive\" preset for 1 month with Postgres storage. 2 weeks is also the default metrics retention policy for Postgres running in Docker (configurable). Depending on the amount of schema objects - tables, indexes, stored procedures and especially on number of unique SQLs, it could be also much more. If disk size reduction is wanted for PostgreSQL storage then best would be to use the TimescaleDB extension - it has built-in compression and disk footprint is x5 time less than vanilla Postgres, while retaining full SQL support.
A low-spec (1 vCPU, 2 GB RAM) cloud machine can easily monitor 100 DBs in \"exhaustive\" settings (i.e. almost all metrics are monitored in 1-2min intervals) without breaking a sweat (\\<20% load).
A single Postgres node should handle thousands of requests per second.
When high metrics write latency is problematic (e.g. using a DBaaS across the Atlantic) then increasing the default maximum batching delay of 250ms usually gives good results. Relevant params: --batching-delay-ms / PW_BATCHING_MAX_DELAY_MS
.
Note that when monitoring a very large number of databases, it's possible to \"shard\" / distribute them between many metric collection instances running on different hosts, via the group
attribute. This requires that some hosts have been assigned a non-default group identifier, which is just a text field exactly for this sharding purpose. Relevant params: --group / PW_GROUP
.
Although all cloud service providers offer some kind of built-in instrumentation and graphs, they're mostly rather conservative in this are not to consume extra server resources and not to overflow and confuse beginners with too much information. So for advanced troubleshooting it might make sense to gather some additional metrics on your own, especially given that you can also easily add custom business metrics to pgwatch using plain SQL, for example to track the amount of incoming sales orders. Also with pgwatch / Grafana you have more freedom on the visual representation side and access to around 30 prebuilt dashboards and a lot of freedom creating custom alerting rules.
The common denominator for all managed cloud services is that they remove / disallow dangerous or potentially dangerous functionalities like file system access and untrusted PL-languages like Python - so you'll lose a small amount of metrics and \"helper functions\" compared to a standard on-site setup described in the previous chapter <preparing_databases>
. This also means that you will get some errors displayed on some preset dashboards like \"DB overview\" and thus will be better off using a dashboard called \"DB overview Unprivileged\" tailored specially for such a use case.
pgwatch has been tested to work with the following managed database services:
"},{"location":"howto/using_managed_services.html#google-cloud-sql-for-postgresql","title":"Google Cloud SQL for PostgreSQL","text":"pg_monitor
system role available.gce
.To get most out pgwatch on GCE you need some additional clicks in the GUI / Cloud Console \"Flags\" section to enable some common PostgreSQL monitoring parameters like track_io_timing
and track_functions
.
No Python / OS helpers possible. OS metrics can be integrated in Grafana though using the CloudWatch data source
pg_monitor
system role available.
pgwatch default preset names: rds
, aurora
Documentation:
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.AuroraPostgreSQL.html
Note that the AWS Aurora PostgreSQL-compatible engine is missing some additional metrics compared to normal RDS.
"},{"location":"howto/using_managed_services.html#azure-database-for-postgresql","title":"Azure Database for PostgreSQL","text":"pg_monitor
system role available.azure
Surprisingly on Azure some file access functions are whitelisted, thus one can for example use the wal_size
metrics.
Note
By default Azure has pg_stat_statements not fully activated by default, so you need to enable it manually or via the API. Documentation link here.
"},{"location":"howto/using_managed_services.html#aiven-for-postgresql","title":"Aiven for PostgreSQL","text":"The Aiven developer documentation contains information on how to monitor PostgreSQL instances running on the Aiven platform with pgwatch.
"},{"location":"intro/features.html","title":"List of main features","text":"The pgwatch project got started back in 2016 by Kaarel Moppel and released in 2017 initially for internal monitoring needs at Cybertec as all the Open Source PostgreSQL monitoring tools at the time had various limitations like being too slow and invasive to set up or providing a fixed set of visuals and metrics.
For more background on the project motivations and design goals see the original series of blogposts announcing the project and the following feature updates released approximately twice per year.
Cybertec also provides commercial 9-to-5 and 24/7 support for pgwatch.
For feature requests or troubleshooting assistance please open an issue on project's Github page.
"},{"location":"reference/advanced_features.html","title":"Advanced features","text":"Over the years the core functionality of fetching metrics from a set of plain Postgres DB-s has been extended in many ways to cover some common problem areas like server log monitoring and supporting monitoring of some other popular tools often used together with Postgres, like the PgBouncer connection pooler for example.
"},{"location":"reference/advanced_features.html#patroni-support","title":"Patroni support","text":"Patroni is a popular Postgres specific HA-cluster manager that makes node management simpler than ever, meaning that everything is dynamic though - cluster members can come and go, making monitoring in the standard way a bit tricky. But luckily Patroni cluster members information is stored in a DCS (Distributed Consensus Store), like etcd, so it can be fetched from there periodically.
When 'patroni' is selected as a source type then the usual Postgres host/port fields should be left empty (\"dbname\" can still be filled if only a specific single database is to be monitored) and instead \"Host config\" JSON field should be filled with DCS address, type and scope (cluster name) information. A sample config (for Config DB based setups) looks like:
{\n \"dcs_type\": \"etcd\",\n \"dcs_endpoints\": [\"http://127.0.0.1:2379\"],\n \"scope\": \"batman\",\n \"namespace\": \"/service/\"\n }\n
For YAML based setups an example can be found from the instances.yaml file.
If Patroni is powered by etcd, then also username, password, ca_file, cert_file, key_file optional security parameters can be defined - other DCS systems are currently only supported without authentication.
Also, if you don't use the standby nodes actively for queries then it might make sense to decrease the volume of gathered metrics and to disable the monitoring of such nodes with the \"Master mode only?\" checkbox (when using the Web UI) or with only_if_master=true if using a YAML based setup.
"},{"location":"reference/advanced_features.html#log-parsing","title":"Log parsing","text":"As of v1.7.0 the metrics collector daemon, when running on a DB server (controlled best over a YAML config), has capabilities to parse the database server logs for errors. Out-of-the-box it will though only work when logs are written in CSVLOG format. For other formats user needs to specify a regex that parses out named groups of following fields: database_name, error_severity. See here for an example regex.
Note that only the event counts are stored, no error texts, usernames or other infos! Errors are grouped by severity for the monitored DB and for the whole instance. The metric name to enable log parsing is \"server_log_event_counts\". Also note that for auto-detection of log destination / setting to work, the monitoring user needs superuser / pg_monitor privileges - if this is not possible then log settings need to be specified manually under \"Host config\" as seen for example here.
Sample configuration if not using CSVLOG logging:
On Postgres side (on the monitored DB)
# Debian / Ubuntu default log_line_prefix actually\n log_line_prefix = '%m [%p] %q%u@%d '\n
YAML config (recommended when \"pushing\" metrics from DB nodes to a central metrics DB) ## logs_glob_path is only needed if the monitoring user is cannot auto-detect it (i.e. not a superuser / pg_monitor role)\n # logs_glob_path:\n logs_match_regex: '^(?P<log_time>.*) \\[(?P<process_id>\\d+)\\] (?P<user_name>.*)@(?P<database_name>.*?) (?P<error_severity>.*?): '\n
For log parsing to work the metric server_log_event_counts needs to be enabled or a preset config including it used - like the \"full\" preset."},{"location":"reference/advanced_features.html#pgbouncer-support","title":"PgBouncer support","text":"pgwatch also supports collecting internal statistics from the PgBouncer connection pooler, via the built-in special \"pgbouncer\" database and the SHOW STATS
command. To enable it choose the according DB Type, provide connection info to the pooler port and make sure the pgbouncer_stats metric or \"pgbouncer\" preset config is selected for the host. Note that for the \"DB Name\" field you should insert not \"pgbouncer\" (although this special DB provides all the statistics) but the real name of the pool you wish to monitor or leave it empty to track all pools. In latter case individual pools will be identified / separated via the \"database\" tag.
There's also a built-in Grafana dashboard for PgBouncer data, looking like that:
"},{"location":"reference/advanced_features.html#pgpool-ii-support","title":"Pgpool-II support","text":"Quite similar to PgBouncer, also Pgpool offers some statistics on pool performance and status, which might be of interest especially if using the load balancing features. To enable it choose the according DB Type, provide connection info to the pooler port and make sure the pgpool_stats metric / preset config is selected for the host.
The built-in Grafana dashboard for Pgpool data looks something like that:
"},{"location":"reference/advanced_features.html#prometheus-scraping","title":"Prometheus scraping","text":"pgwatch was originally designed with direct metrics storage in mind, but later also support for externally controlled Prometheus scraping was added.
To enable the scraping endpoint, add this commandline parameter: --sink=prometheus://<host>:<port>/<namespace>
. If you omit host (Ex: --sink=prometheus://:8080
), server listens on all interfaces and supplied port. If you omit namespace, default is pgwatch
.
Additionally, note that you still need to specify some metrics config as usual - only metrics with interval values bigger than zero will be populated on scraping.
Currently, a few built-in metrics that require some state to be stored between scrapes, e.g. the \"change_events\" metric, will currently be ignored. Also, non-numeric data columns will be ignored! Tag columns will be preserved though as Prometheus \"labels\".
"},{"location":"reference/advanced_features.html#cloud-providers-support","title":"Cloud providers support","text":"Due to popularity of various managed PostgreSQL offerings there's also support for some managed options in sense of Preset Configs, that take into account the fact that on such platforms you get a limited user that doesn't have access to all metrics or some features have just been plain removed. Thus, to reduce server log errors and save time on experimenting there are following presets available:
Some variables influence multiple components. Command line parameters override env. variables (when doing custom deployments).
"},{"location":"reference/env_variables.html#docker-image-specific","title":"Docker image specific","text":"See pgwatch --help
output for details.
A basic Helm chart templates for installing pgwatch to a Kubernetes cluster are available as a standalone repository.
Notice
Charts are not considered as a part of pgwatch and are not maintained by pgwatch developers.
The corresponding setup can be found in repository, whereas installation is done via the following commands:
cd openshift_k8s\nhelm install -f chart-values.yml pgwatch ./helm-chart\n
Please have a look at helm-chart/values.yaml
to get additional information of configurable options.
Metrics are named SQL queries that return a timestamp and pretty much anything else you find useful. Most metrics have many different query text versions for different target PostgreSQL versions, also optionally taking into account primary / replica state and as of v1.8 also versions of installed extensions.
-- a sample metric\nSELECT\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int;\n
Correct version of the metric definition will be chosen automatically by regularly connecting to the target database and checking the Postgres version, recovery state, and if the monitoring user is a superuser or not.
"},{"location":"reference/metric_definitions.html#built-in-metrics-and-presets","title":"Built-in metrics and presets","text":"There's a good set of pre-defined metrics & metric configs provided by the pgwatch project to cover all typical needs, but when monitoring hundreds of hosts you'd typically want to develop some custom Preset Configs or at least adjust the metric fetching intervals according to your monitoring goals.
Some things to note about the built-in metrics:
The \"change_events\" built-in metric, tracking DDL & config changes, uses internally some other \"*_hashes\" metrics which are not meant to be used on their own. Such metrics are described also accordingly on the Web UI /metrics page, and they should not be removed.
"},{"location":"reference/metric_definitions.html#recommendations","title":"recommendations","text":"When enabled (i.e. interval > 0
), this metric will find all other metrics starting with reco_*
and execute those queries. The purpose of the metric is to spot some performance, security and other \"best practices\" violations. Users can add new reco_*
queries freely.
This enables Postgres server log \"tailing\" for errors. Can't be used for \"pull\" setups though unless the DB logs are somehow mounted / copied over, as real file access is needed. See the Log parsing chapter for details.
"},{"location":"reference/metric_definitions.html#instance_up","title":"instance_up","text":"For normal metrics there will be no data rows stored if the DB is not reachable, but for this one there will be a 0 stored for the \"is_up\" column that under normal operations would always be 1. This metric can be used to calculate some \"uptime\" SLA indicator for example.
"},{"location":"reference/metric_definitions.html#archiver","title":"archiver","text":"This metric retrieves key statistics from the PostgreSQL pg_stat_archiver
view, providing insights into the status of WAL file archiving. It returns the total number of successfully archived files and failed archiving attempts. Additionally, it identifies if the most recent attempt resulted in a failure and calculates how many seconds have passed since the last failure. The metric only considers data if WAL archiving is enabled in the system, helping administrators monitor and diagnose issues related to the archiving process.
This metric gathers detailed information from the PostgreSQL pg_stat_activity
view, providing an overview of the current session and activity state for the database. It tracks the total number of client backends, active sessions, idle sessions, sessions waiting on locks, and background workers. The metric also calculates statistics on blocked sessions, longest waiting times, average and longest session durations, transaction times, and query durations. Additionally, it monitors autovacuum worker activity and provides the age of the oldest transaction (measured by xmin
). This metric helps administrators monitor session states, detect bottlenecks, and ensure the system is within its connection limits, providing visibility into database performance and contention.
This metric retrieves statistics from the pg_stat_bgwriter
view, providing information about the background writer process in PostgreSQL. It reports the number of buffers that have been cleaned (written to disk) by the background writer, how many times buffers were written because the background writer reached the maximum limit (maxwritten_clean
), and the total number of buffers allocated. Additionally, it calculates the time in seconds since the last reset of these statistics. This metric helps monitor the efficiency and behavior of PostgreSQL's background writer, which plays a crucial role in managing I/O by writing modified buffers to disk, thus helping to ensure smooth database performance.
This metric provides information about lock contention in PostgreSQL by identifying sessions that are waiting for locks and the sessions holding those locks. It captures details from the pg_locks
view and the pg_stat_activity
view to highlight the interactions between the waiting and blocking sessions. The result helps identify which queries are causing delays due to lock contention, the type of locks involved, and the users or sessions responsible for holding or waiting on locks. This metric is useful for diagnosing performance bottlenecks related to database locking.
This metric provides insights into the activity and performance of PostgreSQL's checkpointer process, which ensures that modified data pages are regularly written to disk to maintain consistency. It tracks the number of checkpoints that have been triggered either by the system's timing or by specific requests, as well as how many restart points have been completed in standby environments. Additionally, it measures the time spent writing and synchronizing buffers to disk, the total number of buffers written, and how long it has been since the last reset of these statistics. This metric helps administrators understand how efficiently the system is handling checkpoints and whether there might be I/O performance issues related to the frequency or duration of checkpoint operations.
"},{"location":"reference/metric_definitions.html#db_stats","title":"db_stats","text":"This metric provides a comprehensive overview of various performance and health statistics for the current PostgreSQL database. It tracks key metrics such as the number of active database connections (numbackends
), transaction statistics (committed, rolled back), block I/O (blocks read and hit in the cache), and tuple operations (rows returned, fetched, inserted, updated, deleted). Additionally, it monitors conflicts, temporary file usage, deadlocks, and block read/write times.
The metric also includes system uptime by calculating how long the PostgreSQL postmaster
process has been running and tracks checksum failures and the time since the last checksum failure. It identifies if the database is in recovery mode, retrieves the system identifier, and tracks session-related statistics such as total session time, active time, idle-in-transaction time, and sessions that were abandoned, fatal, or killed.
Lastly, it monitors the number of invalid indexes that are not currently being rebuilt. This metric helps database administrators gain insights into overall database performance, transaction behavior, session activity, and potential index-related issues, which are critical for efficient database management and troubleshooting.
"},{"location":"reference/metric_definitions.html#wal","title":"wal","text":"This metric tracks key information about the PostgreSQL system's write-ahead logging (WAL) and recovery state. It calculates the current WAL location, showing how far the system has progressed in terms of WAL writing or replaying if in recovery mode. The metric also indicates whether the database is in recovery, monitors the system's uptime since the postmaster
process started, and provides the system's unique identifier. Additionally, it retrieves the current timeline, which is essential for tracking the state of the WAL log and recovery process. This metric helps administrators monitor database health, especially in terms of recovery and WAL operations.
This metric identifies lock contention in the PostgreSQL database by tracking sessions that are waiting for locks and the corresponding sessions holding those locks. It examines active queries in the current database and captures detailed information about both the waiting and blocking sessions. For each waiting session, it records the lock type, user, lock mode, and the query being executed, as well as the table involved. Similarly, for the session holding the lock, it captures the same details. This helps database administrators identify queries that are causing delays due to lock contention, enabling them to troubleshoot performance issues and optimize query execution.
"},{"location":"reference/metric_definitions.html#kpi","title":"kpi","text":"This metric provides a detailed overview of PostgreSQL database performance and activity. It tracks the current WAL (Write-Ahead Log) location, the number of active and blocked backends, and the oldest transaction time. It calculates the total transaction rate (TPS) by summing committed and rolled-back transactions, as well as specific statistics on table and index performance, such as the number of sequential scans on tables larger than 10MB and the number of function calls.
Additionally, the metric tracks block read and write times, the amount of temporary bytes used, deadlocks, and whether the database is in recovery mode. Finally, it calculates the uptime of the PostgreSQL postmaster
process. This information helps administrators monitor and manage system performance, detect potential bottlenecks, and optimize query and transaction behavior.
This metric provides detailed statistics about the performance and resource usage of SQL queries executed on the PostgreSQL database. It collects data from the pg_stat_statements
view, focusing on queries that have been executed more than five times and have significant execution time (greater than 5 milliseconds). It aggregates important performance metrics for each query, such as:
calls
), total execution time, and total planning time.The metric ranks queries based on different performance factors, including execution time, number of calls, block reads/writes, and temporary block usage, and it limits the results to the top 100 queries in each category. This helps administrators identify resource-intensive queries, optimize database performance, and improve query efficiency by focusing on those that consume the most I/O or take the longest to execute.
"},{"location":"reference/metric_definitions.html#table_stats","title":"table_stats","text":"This metric collects and summarizes detailed information about table sizes, table activity, and maintenance operations in PostgreSQL. It tracks both individual tables and partitioned tables, including their root partitions. The metric calculates the size of each table (in bytes), as well as other key statistics like sequential scans, index scans, tuples inserted, updated, or deleted, and the number of live and dead tuples. It also tracks maintenance operations like vacuum and analyze runs, as well as whether autovacuum is disabled for specific tables.
For partitioned tables, the metric aggregates the statistics across all partitions and provides a summary of the partitioned table as a whole, marking it as the root partition. Additionally, it calculates the time since the last vacuum and analyze operations and captures transaction freeze age for each table, which helps monitor when a table might need a vacuum to prevent transaction wraparound.
By focusing on tables larger than 10MB and ignoring temporary and system tables, this metric helps database administrators monitor the largest and most active tables in their database, ensuring that maintenance operations like vacuum and analyze are running effectively and identifying tables that may be contributing to performance bottlenecks due to size or activity.
"},{"location":"reference/metric_definitions.html#custom-metrics","title":"Custom metrics","text":"For defining metrics definitions you should adhere to a couple of basic concepts:
Every metric query should have an epoch_ns
(nanoseconds since epoch column) to record the metrics reading time. If the column is not there, things will still work but server timestamp of the metrics gathering daemon will be used, some a small loss (assuming intra-datacenter monitoring with little lag) of precision occurs.
Queries should only return text, integer, boolean or floating point (a.k.a. double precision) Postgres data types. Note that columns with NULL values are not stored at all in the data layer as it's a bit bothersome to work with NULLs!
Column names should be descriptive enough so that they're self-explanatory, but not too long as it costs storage
Metric queries should execute fast - at least below the selected Statement timeout (default 5s)
Columns can be optionally \"tagged\" by prefixing them with tag_
. By doing this, the column data will be indexed by the Postgres giving following advantages:
All fetched metric rows can also be \"prettyfied\" with any custom static key-value data, per host. To enable use the \"Custom tags\" Web UI field for the monitored DB entry or \"custom_tags\" YAML field. Note that this works per host and applies to all metrics.
For Prometheus the numerical columns are by default mapped to a Value Type of \"Counter\" (as most Statistics Collector columns are cumulative), but when this is not the case and the column is a \"Gauge\" then according column attributes should be declared. See below section on column attributes for details.
For Prometheus all text fields will be turned into tags / labels as only floats can be stored!
/etc/pgwatch/metrics
. The folder name will be the metrics name, so choose wisely.Create a new subfolder for each \"minimally supported\" Postgres version and insert the metrics SQL definition into a file named \"metric.sql\".
Notice
Note the \"minimally supported\" part - i.e. if your query will work from version v11.0 to v17 then you only need one entry called \"11\". If there was a breaking change in the internal catalogs at v13 so that the query stopped working, you need a new entry named \"13\" that will be used for all versions above v13.
Activate the newly added metric by including it in some existing Preset Config or add it directly to the YAML config \"custom_metrics\" section.
The behaviour of plain metrics can be extended with a set of attributes that will modify the gathering in some way. The attributes are stored in YAML files called metric_attrs.yaml in a metrics root directory or in the metric_attribute
Config DB table.
Currently supported attributes are:
is_instance_level
Enables caching, i.e. sharing of metric data between various databases of a single instance to reduce load on the monitored server.
wal:\n sqls:\n 11: |\n select /* pgwatch_generated */\n ...\n gauges:\n - '*'\n is_instance_level: true\n
statement_timeout_seconds
Enables to override the default 'per monitored DB' statement timeouts on metric level.
metric_storage_name
Enables dynamic \"renaming\" of metrics at storage level, i.e. declaring almost similar metrics with different names but the data will be stored under one metric. Currently used (for out-of-the box metrics) only for the stat_statements_no_query_text
metric, to not store actual query texts from the \"pg_stat_statements\" extension for more security sensitive instances.
extension_version_based_overrides
Enables to \"switch out\" the query text from some other metric based on some specific extension version. See 'reco_add_index' for an example definition.
disabled_days
Enables to \"pause\" metric gathering on specified days. See metric_attrs.yaml
for \"wal\" for an example.
disabled_times
Enables to \"pause\" metric gathering on specified time intervals. e.g. \"09:00-17:00\" for business hours. Note that if time zone is not specified the server time of the gather daemon is used. disabled_days / disabled_times can also be defined both on metric and host (host_attrs) level.
Besides the _tag column prefix modifier, it's also possible to modify the output of certain columns via a few attributes. It's only relevant for Prometheus output though currently, to set the correct data types in the output description, which is generally considered a nice-to-have thing anyway. For YAML based setups this means adding a \"column_attrs.yaml\" file in the metrics top folder and for Config DB based setups an according \"column_attrs\" JSON column should be filled via the Web UI.
Supported column attributes:
gauges
Describe the mentioned output columns as of TYPE gauge, i.e. the value can change any time in any direction. Default TYPE for pgwatch is counter.
table_stats_approx:\n sqls:\n 11: |\n ...\n gauges:\n - table_size_b\n - total_relation_size_b\n - toast_size_b\n - seconds_since_last_vacuum\n - seconds_since_last_analyze\n - n_live_tup\n - n_dead_tup\n metric_storage_name: table_stats\n
As mentioned in Helper Functions section, Postgres knows very little about the Operating System that it's running on, so in some (most) cases it might be advantageous to also monitor some basic OS statistics together with the PostgreSQL ones, to get a better head start when troubleshooting performance problems. But as setup of such OS tools and linking the gathered data is not always trivial, pgwatch has a system of helpers for fetching such data.
One can invent and install such helpers on the monitored databases freely to expose any information needed (backup status etc.) via Python, or any other PL-language supported by Postgres, and then add these metrics similarly to any other Postgres-native metrics.
"},{"location":"reference/security.html","title":"Security aspects","text":""},{"location":"reference/security.html#general-security-information","title":"General security information","text":"Security can be tightened for most pgwatch components quite granularly, but the default values for the Docker image don't focus on security though but rather on being quickly usable for ad-hoc performance troubleshooting, which is where the roots of pgwatch lie.
Some points on security:
The administrative Web UI doesn't have by default any security. Configurable via env. variables.
Viewing Grafana dashboards by default doesn't require login. Editing needs a password. Configurable via env. variables.
Dashboards based on the \"stat_statements\" metric (Stat Statement Overview / Top) expose actual queries.
They should be \"mostly\" stripped of details though and replaced by placeholders by Postgres, but if no risks can be taken such dashboards (or at least according panels) should be deleted. Or as an alternative the stat_statements_no_query_text
and pg_stat_statements_calls
metrics could be used, which don't store query texts in the first place.
Safe certificate connections to Postgres are supported. According sslmode (verify-ca, verify-full) and cert file paths need to be specified then in connection string on Web UI \"/dbs\" page or in the YAML config.
Note that although pgwatch can handle password security, in many cases it's better to still use the standard LibPQ .pgpass file to store passwords.
Some common sense security is built into default Docker images for all components but not activated by default. A sample command to launch pgwatch with following security \"checkpoints\" enabled:
Password encryption for connect strings stored in the Config DB
docker run --name pw3 -d --restart=unless-stopped \\\n -p 3000:3000 -p 8080:8080 \\\n -e PW_GRAFANASSL=1 -e PW_WEBSSL=1 \\\n -e PW_GRAFANANOANONYMOUS=1 -e PW_GRAFANAUSER=myuser \\\n -e PW_GRAFANAPASSWORD=mypass \\\n -e PW_WEBNOANONYMOUS=1 -e PW_WEBNOCOMPONENTLOGS=1 \\\n -e PW_WEBUSER=myuser -e PW_WEBPASSWORD=mypass \\\n -e PW_AES_GCM_KEYPHRASE=qwerty \\\n cybertec/pgwatch\n
For custom installs it's up to the user though. A hint - Docker launcher files can also be inspected to see which config parameters are being touched.
"},{"location":"reference/technical_details.html","title":"Technical details","text":"Here are some technical details that might be interesting for those who are planning to use pgwatch for critical monitoring tasks or customize it in some way.
Dynamic management of monitored databases, metrics and their intervals - no need to restart/redeploy
Config DB or YAML / SQL files are scanned every 2 minutes (by default, changeable via --servers-refresh-loop-seconds
) and changes are applied dynamically. As common connectivity errors also handled, there should be no need to restart the gatherer \"for fun\". Please always report issues which require restarting.
There are some safety features built-in so that monitoring would not obstruct actual operation of databases
-e PW_WEBSSL=1 -e PW_GRAFANASSL=1
when launching Docker)Instance-level metrics caching
To further reduce load on multi-DB instances, pgwatch can cache the output of metrics that are marked to gather only instance-level data. One such metric is for example \"wal\", and the metric attribute is \"is_instance_level\". Caching will be activated only for continuous source types, and to a default limit of up to 30 seconds (changeable via the --instance-level-cache-max-seconds
param).
As described in the Components chapter, there is a couple of ways how to set up pgwatch. Two most common ways though are the central Config DB based \"pull\" approach and the YAML file based \"push\" approach, plus Grafana to visualize the gathered metrics.
"},{"location":"tutorial/custom_installation.html#config-db-based-setup","title":"Config DB based setup","text":""},{"location":"tutorial/custom_installation.html#overview-of-installation-steps","title":"Overview of installation steps","text":"Below are the sample steps for a custom installation from scratch using Postgres for the pgwatch configuration DB, metrics DB and Grafana config DB.
All examples here assume Ubuntu as OS - but it's basically the same for RedHat family of operations systems also, minus package installation syntax differences.
Install Postgres
Follow the standard Postgres install procedure basically. Use the latest major version available, but minimally v11+ is recommended for the metrics DB due to recent partitioning speedup improvements and also older versions were missing some default JSONB casts so that a few built-in Grafana dashboards need adjusting otherwise.
To get the latest Postgres versions, official Postgres PGDG repos are to be preferred over default disto repos. Follow the instructions from:
Install pgwatch - either from pre-built packages or by compiling the Go code
Using pre-built packages
The pre-built DEB / RPM / Tar packages are available on the GitHub releases page.
# find out the latest package link and replace below, using v1.8.0 here\nwget https://github.com/cybertec-postgresql/pgwatch/releases/download/v1.8.0/pgwatch_v1.8.0-SNAPSHOT-064fdaf_linux_64-bit.deb\nsudo dpkg -i pgwatch_v1.8.0-SNAPSHOT-064fdaf_linux_64-bit.deb\n
Compiling the Go code yourself
This method of course is not needed unless dealing with maximum security environments or some slight code changes are required.
Install Go by following the official instructions
Get the pgwatch project's code and compile the gatherer daemon
git clone https://github.com/cybertec-postgresql/pgwatch.git\ncd pgwatch/internal/webui\nyarn install --network-timeout 100000 && yarn build\ncd ..\ngo build\n
After fetching all the Go library dependencies (can take minutes) an executable named \"pgwatch\" should be generated. Additionally, it's a good idea to copy it to /usr/bin/pgwatch
.
Configure a SystemD auto-start service (optional)
Sample startup scripts can be found at /etc/pgwatch/startup-scripts/pgwatch.service or online here. Note that they are OS-agnostic and might need some light adjustment of paths, etc. - so always test them out.
Boostrap the config DB
Create a user to \"own\" the pgwatch
schema
Typically called pgwatch
but can be anything really, if the schema creation file is adjusted accordingly.
psql -c \"create user pgwatch password 'xyz'\"\npsql -c \"create database pgwatch owner pgwatch\"\n
Roll out the pgwatch config schema
The schema will most importantly hold connection strings of DBs to be monitored and the metric definitions.
# FYI - one could get the below schema files also directly from GitHub\n# if re-using some existing remote Postgres instance where pgwatch was not installed\npsql -f /etc/pgwatch/sql/config_store/config_store.sql pgwatch\npsql -f /etc/pgwatch/sql/config_store/metric_definitions.sql pgwatch\n
Bootstrap the measurements storage DB
Create a dedicated database for storing metrics and a user to \"own\" the metrics schema
Here again default scripts expect a role named pgwatch
but can be anything if to adjust the scripts.
psql -c \"create database pgwatch_metrics owner pgwatch\"\n
Roll out the pgwatch metrics storage schema
This is a place to pause and first think how many databases will be monitored, i.e. how much data generated, and based on that one should choose a suitable metrics storage schema. There are a couple of different options available that are described here in detail, but the gist of it is that you don't want partitioning schemes too complex if you don't have zounds of data and don't need the fastest queries. For a smaller amount of monitored DBs (a couple dozen to a hundred) the default \"metric-time\" is a good choice. For hundreds of databases, aggressive intervals, or long term storage usage of the TimescaleDB extension is recommended.
cd /etc/pgwatch/sql/metric_store\npsql -f roll_out_metric_time.psql pgwatch_metrics\n
Note
Default retention for Postgres storage is 2 weeks! To change, use the --pg-retention-days / PW_PG_RETENTION_DAYS
gatherer parameter.
Prepare the \"to-be-monitored\" databases for metrics collection
As a minimum we need a plain unprivileged login user. Better though is to grant the user also the pg_monitor
system role, available on v10+. Superuser privileges should be normally avoided for obvious reasons of course, but for initial testing in safe environments it can make the initial preparation (automatic helper rollouts) a bit easier still, given superuser privileges are later stripped.
To get most out of your metrics some SECURITY DEFINER
wrappers functions called \"helpers\" are recommended on the DB-s under monitoring. See the detailed chapter on the \"preparation\" topic here for more details.
Configure DB-s and metrics / intervals to be monitored
pgwatch.monitored_db
tableStart the pgwatch metrics collection agent
The gatherer has quite some parameters (use the --help
flag to show them all), but simplest form would be:
pgwatch-daemon \\\n --host=localhost --user=pgwatch --dbname=pgwatch \\\n --datastore=postgres --pg-metric-store-conn-str=postgresql://pgwatch@localhost:5432/pgwatch_metrics \\\n --verbose=info\n
Default connections params expect a trusted localhost Config DB setup so mostly the 2nd line is not needed, actually.
Or via SystemD if set up in previous steps
useradd -m -s /bin/bash pgwatch # default SystemD templates run under the pgwatch user\nsudo systemctl start pgwatch\nsudo systemctl status pgwatch\n
After initial verification that all works it's usually good idea to set verbosity back to default by removing the verbose flag.
Another tip to configure connection strings inside SystemD service files is to use the \"systemd-escape\" utility to escape special characters like spaces etc. if using the LibPQ connect string syntax rather than JDBC syntax.
Monitor the console or log output for any problems
If you see metrics trickling into the \"pgwatch_metrics\" database (metric names are mapped to table names and tables are auto-created), then congratulations - the deployment is working! When using some more aggressive preset metrics config then there are usually still some errors though, due to the fact that some more extensions or privileges are missing on the monitored database side. See the according chapter here.
Info
When you're compiling your own gatherer then the executable file will be named just pgwatch
instead of pgwatch-daemon
to avoid mixups.
Install Grafana
Create a Postgres database to hold Grafana internal config, like dashboards etc.
Theoretically it's not absolutely required to use Postgres for storing Grafana internal settings / dashboards, but doing so has 2 advantages - you can easily roll out all pgwatch built-in dashboards and one can also do remote backups of the Grafana configuration easily.
psql -c \"create user pgwatch_grafana password 'xyz'\"\npsql -c \"create database pgwatch_grafana owner pgwatch_grafana\"\n
Follow the instructions from https://grafana.com/docs/grafana/latest/installation/debian/, basically something like:
wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -\necho \"deb https://packages.grafana.com/oss/deb stable main\" | sudo tee -a /etc/apt/sources.list.d/grafana.list\nsudo apt-get update && sudo apt-get install grafana\n\n# review / change config settings and security, etc\nsudo vi /etc/grafana/grafana.ini\n\n# start and enable auto-start on boot\nsudo systemctl daemon-reload\nsudo systemctl start grafana-server\nsudo systemctl status grafana-server\n
Default Grafana port: 3000
Configure Grafana config to use our pgwatch_grafana
DB
Place something like below in the [database]
section of /etc/grafana/grafana.ini
[database]\ntype = postgres\nhost = my-postgres-db:5432\nname = pgwatch_grafana\nuser = pgwatch_grafana\npassword = xyz\n
Taking a look at [server], [security]
and [auth*]
sections is also recommended.
Set up the pgwatch
metrics database as the default datasource
We need to tell Grafana where our metrics data is located. Add a datasource via the Grafana UI (Admin -> Data sources) or adjust and execute the \"pgwatch/bootstrap/grafana_datasource.sql\" script on the pgwatch_grafana
DB.
Add pgwatch predefined dashboards to Grafana
This could be done by importing the pgwatch dashboard definition JSONs manually, one by one, from the \"grafana\" folder (\"Import Dashboard\" from the Grafana top menu) or via as small helper script located at /etc/pgwatch/grafana-dashboards/import_all.sh. The script needs some adjustment for metrics storage type, connect data and file paths.
Optionally install also Grafana plugins
Currently, one pre-configured dashboard (Biggest relations treemap) use an extra plugin - if planning to that dash, then run the following:
grafana-cli plugins install savantly-heatmap-panel\n
Start discovering the preset dashbaords
If the previous step of launching pgwatch daemon succeeded, and it was more than some minutes ago, one should already see some graphs on dashboards like \"DB overview\" or \"DB overview Unprivileged / Developer mode\" for example.
From v1.4 one can also deploy the pgwatch gatherer daemons more easily in a de-centralized way, by specifying monitoring configuration via YAML files. In that case there is no need for a central Postgres \"config DB\".
YAML installation steps
Relevant gatherer parameters / env. vars: --config / PW_CONFIG
and --metrics-folder / PW_METRICS_FOLDER
.
For details on individual steps like installing pgwatch see the above paragraph.
"},{"location":"tutorial/docker_installation.html","title":"Installing using Docker","text":""},{"location":"tutorial/docker_installation.html#simple-setup-steps","title":"Simple setup steps","text":"The simplest real-life pgwatch setup should look something like that:
Decide which metrics storage engine you want to use - cybertecpostgresql/pgwatch-demo uses PostgreSQL. When only Prometheus sink is used (exposing a port for remote scraping), one should use the slimmer cybertecpostgresql/pgwatch image which doesn't have any built in databases.
Find the latest pgwatch release version by going to the project's GitHub Releases page or use the public API with something like that:
curl -so- https://api.github.com/repos/cybertec-postgresql/pgwatch/releases/latest | jq .tag_name | grep -oE '[0-9\\.]+'\n
docker pull cybertecpostgresql/pgwatch-demo:X.Y.Z\n
docker run -d --restart=unless-stopped -p 3000:3000 -p 8080:8080 \\\n--name pw3 cybertecpostgresql/pgwatch-demo:X.Y.Z\n
Note that we're setting the container to be automatically restarted\nin case of a reboot/crash - which is highly recommended if not using\nsome container management framework to run pgwatch.\n
"},{"location":"tutorial/docker_installation.html#more-future-proof-setup-steps","title":"More future-proof setup steps","text":"Although the above simple setup example will do for more temporal setups / troubleshooting sessions, for permanent setups it's highly recommended to create separate volumes for all software components in the container, so that it would be easier to update to newer pgwatch Docker images and pull file system based backups, and also it might be a good idea to expose all internal ports at least on localhost for possible troubleshooting and making possible to use native backup tools more conveniently for Postgres.
Note that, for maximum flexibility, security and update simplicity it's best to do a custom setup though - see the next chapter for that.
So in short, for plain Docker setups would be best to do something like:
# let's create volumes for Postgres, Grafana and pgwatch marker files / SSL certificates\nfor v in pg grafana pw3 ; do docker volume create $v ; done\n\n# launch pgwatch with fully exposed Grafana and Health-check ports\n# and local Postgres and subnet level Web UI ports\ndocker run -d --restart=unless-stopped --name pw3 \\\n -p 3000:3000 -p 8081:8081 -p 127.0.0.1:5432:5432 -p 192.168.1.XYZ:8080:8080 \\\n -v pg:/var/lib/postgresql -v grafana:/var/lib/grafana -v pw3:/pgwatch/persistent-config \\\n cybertecpostgresql/pgwatch-demo:X.Y.Z\n
Note that in non-trusted environments it's a good idea to specify more sensitive ports together with some explicit network interfaces for additional security - by default Docker listens on all network devices!
Also note that one can configure many aspects of the software components running inside the container via ENV - for a complete list of all supported Docker environment variables see the ENV_VARIABLES.md file.
"},{"location":"tutorial/docker_installation.html#available-docker-images","title":"Available Docker images","text":"Following images are regularly pushed to Docker Hub:
cybertecpostgresql/pgwatch-demo
The original pgwatch \u201cbatteries-included\u201d image with PostgreSQL measurements storage. Just insert connect infos to your database via the admin Web UI (or directly into the Config DB) and then turn to the pre-defined Grafana dashboards to analyze DB health and performance.
cybertecpostgresql/pgwatch
A light-weight image containing only the metrics collection daemon / agent, that can be integrated into the monitoring setup over configuration specified either via ENV, mounted YAML files or a PostgreSQL Config DB. See the Component reuse chapter for wiring details.
"},{"location":"tutorial/docker_installation.html#building-custom-docker-images","title":"Building custom Docker images","text":"For custom tweaks, more security, specific component versions, etc. one could easily build the images themselves, just a Docker installation is needed.
"},{"location":"tutorial/docker_installation.html#interacting-with-the-docker-container","title":"Interacting with the Docker container","text":"If launched with the PW_TESTDB=1
env. parameter then the pgwatch configuration database running inside Docker is added to the monitoring, so that you should immediately see some metrics at least on the Health-check dashboard.
To add new databases / instances to monitoring open the administration Web interface on port 8080 (or some other port, if re-mapped at launch) and go to the SOURCES page. Note that the Web UI is an optional component, and one can manage monitoring entries directly in the Postgres Config DB via INSERT
/ UPDATE
into \"pgwatch.monitored_db\"
table. Default user/password are again pgwatch/pgwatchadmin
, database name - pgwatch
. In both cases note that it can take up to 2min (default main loop time, changeable via PW_SERVERS_REFRESH_LOOP_SECONDS
) before you see any metrics for newly inserted databases.
One can edit existing or create new Grafana dashboards, change Grafana global settings, create users, alerts, etc. after logging in as pgwatch/pgwatchadmin
(by default, changeable at launch time).
Metrics and their intervals that are to be gathered can be customized for every database separately via a custom JSON config field or more conveniently by using Preset Configs, like \"minimal\", \"basic\" or \"exhaustive\" (monitored_db.preset_config
table), where the name should already hint at the amount of metrics gathered. For privileged users the \"exhaustive\" preset is a good starting point, and \"unprivileged\" for simple developer accounts.
To add a new metrics yourself (which are simple SQL queries returning any values and a timestamp) head to http://127.0.0.1:8080/metrics. The queries should always include a \"epoch_ns\"
column and \"tag\\_\"
prefix can be used for columns that should be quickly searchable/groupable, and thus will be indexed with the PostgreSQL metric stores. See to the bottom of the \"metrics\" page for more explanations or the documentation chapter on metrics.
For a quickstart on dashboarding, a list of available metrics together with some instructions are presented on the \"Documentation\" dashboard.
Some built-in metrics like \"cpu_load\"
and others, that gather privileged or OS statistics, require installing helper functions (looking like that), so it might be normal to see some blank panels or fetching errors in the logs. On how to prepare databases for monitoring see the Monitoring preparations chapter.
For effective graphing you want to familiarize yourself with the query language of the database system that was selected for metrics storage. Some tips to get going:
max() - min()
aggregates on cumulative counters (most data provided by Postgres is cumulative) would lie.For possible troubleshooting needs, logs of the components running inside Docker are by default (if not disabled on container launch) visible under: http://127.0.0.1:8080/logs/%5Bpgwatch%7Cpostgres%7Cwebui%7Cgrafana. It's of course also possible to log into the container and look at log files directly - they're situated under /var/logs/supervisor/
.
FYI - docker logs ...
command is not really useful after a successful container startup in pgwatch case.
As mentioned in the Components chapter, remember that the pre-built Docker images are just one example how your monitoring setup around the pgwatch metrics collector could be organized. For another example how various components (as Docker images here) can work together, see a Docker Compose example with loosely coupled components here.
"},{"location":"tutorial/docker_installation.html#example-of-advanced-setup-using-yaml-files-and-dual-sinks","title":"Example of advanced setup using YAML files and dual sinks:","text":"pgwatch service in file docker/docker-compose.yml
can look like this:
pgwatch:\n image: cybertecpostgresql/pgwatch:latest\n command:\n - \"--web-disable=true\"\n - \"--sources=/sources.yaml\"\n - \"--sink=postgresql://pgwatch@postgres:5432/pgwatch_metrics\"\n - \"--sink=prometheus://:8080\"\n volumes:\n - \"./sources.yaml:/sources.yaml\"\n ports:\n - \"8080:8080\"\n depends_on:\n postgres:\n condition: service_healthy\n
Source file sources.yaml
in the same directory:
- name: demo\n conn_str: postgresql://pgwatch:pgwatchadmin@postgres/pgwatch'\n preset_metrics: exhaustive\n is_enabled: true\n group: default\n
Running this setup you get pgwatch that uses sources from YAML file and outputs measurements to postgres DB and exposes them for Prometheus to scrape on port 8080 instead of WebUI (which is disabled by --web-disable
). Metrics definition are built-in, you can examine definition in internal/metrics/metrics.yaml
.
As a base requirement you'll need a login user (non-superuser suggested) for connecting to your server and fetching metrics.
Though theoretically you can use any username you like, but if not using \"pgwatch\" you need to adjust the \"helper\" creation SQL scripts (see below for explanation) accordingly, as in those by default the \"pgwatch\" will be granted execute privileges.
CREATE ROLE pgwatch WITH LOGIN PASSWORD 'secret';\n-- For critical databases it might make sense to ensure that the user account\n-- used for monitoring can only open a limited number of connections\n-- (there are according checks in code, but multiple instances might be launched)\nALTER ROLE pgwatch CONNECTION LIMIT 3;\nGRANT pg_monitor TO pgwatch;\nGRANT CONNECT ON DATABASE mydb TO pgwatch;\nGRANT EXECUTE ON FUNCTION pg_stat_file(text) to pgwatch; -- for wal_size metric\n
For most monitored databases it's extremely beneficial (for troubleshooting performance issues) to also activate the pg_stat_statements extension which will give us exact \"per query\" performance aggregates and also enables to calculate how many queries are executed per second for example. In pgwatch context it powers the \"Stat statements Top\" dashboard and many other panels of other dashboards. For additional troubleshooting benefits also the track_io_timing setting should be enabled.
Make sure the Postgres contrib package is installed (should be installed automatically together with the Postgres server package on Debian based systems).
yum install -y postgresqlXY-contrib
apt install postgresql-contrib
Add pg_stat_statements
to your server config (postgresql.conf) and restart the server.
shared_preload_libraries = 'pg_stat_statements'\ntrack_io_timing = on\n
After restarting activate the extension in the monitored DB. Assumes Postgres superuser.
psql -c \"CREATE EXTENSION IF NOT EXISTS pg_stat_statements\"\n
Helper functions in pgwatch context are standard Postgres stored procedures, running under SECURITY DEFINER
privileges. Via such wrapper functions one can do controlled privilege escalation - i.e. to give access to protected Postgres metrics (like active session details, \"per query\" statistics) or even OS-level metrics, to normal unprivileged users, like the pgwatch monitoring role.
If using a superuser login (recommended only for local \"push\" setups) you have full access to all Postgres metrics and would need helpers only for OS remote statistics. For local (push) setups as of pgwatch version 1.8.4 the most typical OS metrics are covered by the --direct-os-stats
flag, explained below.
For unprivileged monitoring users it is highly recommended to take these additional steps on the \"to be monitored\" database to get maximum value out of pgwatch in the long run. Without these additional steps, you lose though about 10-15% of built-in metrics, which might not be too tragical nevertheless. For that use case there's also a preset config named \"unprivileged\".
When monitoring v10+ servers then the built-in pg_monitor system role is recommended for the monitoring user, which almost substitutes superuser privileges for monitoring purposes in a safe way.
"},{"location":"tutorial/preparing_databases.html#rolling-out-common-helpers","title":"Rolling out common helpers","text":"For completely unprivileged monitoring users the following helpers are recommended to make good use of the default \"exhaustive\" Preset Config:
export PGUSER=superuser\npsql -f /etc/pgwatch/metrics/00_helpers/get_stat_activity/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_stat_replication/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_wal_size/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_stat_statements/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_sequences/$pgver/metric.sql mydb\n
Note that there might not be an exact Postgres version match for helper definitions - then replace \\$pgver with the previous available version number below your server's Postgres version number.
Also note that as of v1.8.1 some helpers definition SQLs scripts (like for \"get_stat_statements\") will inspect also the \"search_path\" and by default will not install into schemas that have PUBLIC CREATE privileges, like the \"public\" schema by default has!
Also when rolling out helpers make sure the search_path
is at defaults or set so that it's also accessible for the monitoring role as currently neither helpers nor metric definition SQLs don't assume any particular schema and depend on the search_path
including everything needed.
For more detailed statistics (OS monitoring, table bloat, WAL size, etc.) it is recommended to install also all other helpers found from the /etc/pgwatch/metrics/00_helpers
folder or do it automatically by using the rollout_helper.py script found in the 00_helpers folder.
As of v1.6.0 though helpers are not needed for Postgres-native metrics (e.g. WAL size) if a privileged user (superuser or pg_monitor GRANT) is used, as pgwatch now supports having 2 SQL definitions for each metric - \"normal / unprivileged\" and \"privileged\" / \"superuser\". In the file system /etc/pgwatch/metrics such \"privileged\" access definitions will have a \"_su\" added to the file name.
"},{"location":"tutorial/preparing_databases.html#automatic-rollout-of-helpers","title":"Automatic rollout of helpers","text":"pgwatch can roll out helpers also automatically on the monitored DB. This requires superuser privileges and a configuration attribute for the monitored DB. In YAML config mode it's called is_superuser, in Config DB md_is_superuser, in the Web UI one can tick the \"Auto-create helpers\" checkbox.
After the automatic rollout it's still generally recommended to remove the superuser privileges from the monitoring role, which now should have GRANTs to all automatically created helper functions. Note though that all created helpers will not be immediately usable as some are for special purposes and need additional dependencies.
A hint: if it can be foreseen that a lot of databases will be created on some instance (generally not a good idea though) it might be a good idea to roll out the helpers directly in the template1 database - so that all newly created databases will get them automatically.
"},{"location":"tutorial/preparing_databases.html#plpython-helpers","title":"PL/Python helpers","text":"PostgreSQL in general is implemented in such a way that it does not know too much about the operating system that it is running on. This is a good thing for portability but can be somewhat limiting for monitoring, especially when there is no system monitoring framework in place or the data is not conveniently accessible together with metrics gathered from Postgres. To overcome this problem, users can also choose to install helpers extracting OS metrics like CPU, RAM usage, etc. so that this data is stored together with Postgres-native metrics for easier graphing / correlation / alerting. This also enable to be totally independent of any System Monitoring tools like Zabbix, etc., with the downside that everything is gathered over Postgres connections so that when Postgres is down no OS metrics can be gathered also. Since v1.8.4 though the latter problem can be reduced for local \"push\" based setups via the --direct-os-stats
option plus according metrics configuration (e.g. the \"full\" preset).
Note though that PL/Python is usually disabled by DB-as-a-service providers like AWS RDS for security reasons.
# first install the Python bindings for Postgres\napt install postgresql-plpython3-XY\n# yum install postgresqlXY-plpython3\n\npsql -c \"CREATE EXTENSION plpython3u\"\npsql -f /etc/pgwatch/metrics/00_helpers/get_load_average/9.1/metric.sql mydb\n\n# psutil helpers are only needed when full set of common OS metrics is wanted\napt install python3-psutil\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_cpu/9.1/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_mem/9.1/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_disk/9.1/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_disk_io_total/9.1/metric.sql mydb\n
Note that we're assuming here that we're on a modern Linux system with Python 3 as default. For older systems Python 3 might not be an option though, so you need to change plpython3u to plpythonu and also do the same replace inside the code of the actual helper functions! Here the rollout_helper.py script with it's --python2
flag can be helpful again.
pg_monitor
, that are exactly meant to be used for such cases where we want to give access to all Statistics Collector views without any other \"superuser behaviour\". See here for documentation on such special system roles. Note that currently most out-of-the-box metrics first rely on the helpers as v10 is relatively new still, and only when fetching fails, direct access with the \"Privileged SQL\" is tried.--direct-os-stats
parameter to signal that we can fetch the data for the default psutil*
metrics directly from OS counters. If direct OS fetching fails though, the fallback is still to try via PL/Python wrappers.pg_upgrade
, this could result in error messages thrown. Then just drop those failing helpers on the \"to be upgraded\" cluster and re-create them after the upgrade process.As mentioned above, helper / wrapper functions are not strictly needed, they just provide a bit more information for unprivileged users - thus for developers with no means to install any wrappers as superuser, it's also possible to benefit from pgwatch - for such use cases e.g. the \"unprivileged\" preset metrics profile and the according \"DB overview Unprivileged / Developer\" are a good starting point as it only assumes existence of pg_stat_statements
(which should be available by all cloud providers).
When adding a new \"to be monitored\" entry a source type needs to be selected. Following types are available:
"},{"location":"tutorial/preparing_databases.html#postgres","title":"postgres","text":"Monitor a single database on a single Postgres instance. When using the Web UI and the \"DB name\" field is left empty, there's as a one time operation where all non-template DB names are fetched, prefixed with \"Unique name\" field value and added to monitoring (if not already monitored). Internally monitoring always happens \"per DB\" not \"per cluster\" though.
"},{"location":"tutorial/preparing_databases.html#postgres-continuous-discovery","title":"postgres-continuous-discovery","text":"Monitor a whole (or subset of DB-s) of Postgres cluster / instance. Host information without a DB name needs to be specified and then the pgwatch daemon will periodically scan the cluster and add any found and not yet monitored DBs to monitoring. In this mode it's also possible to specify regular expressions to include/exclude some database names.
"},{"location":"tutorial/preparing_databases.html#pgbouncer","title":"pgbouncer","text":"Use to track metrics from PgBouncer's SHOW STATS
command. In place of the Postgres \"DB name\" the name of the PgBouncer \"pool\" to be monitored must be inserted.
Use to track joint metrics from Pgpool2's SHOW POOL_NODES
and SHOW POOL_PROCESSES
commands. Pgpool2 from version 3.0 is supported.
Patroni is a HA / cluster manager for Postgres that relies on a DCS (Distributed Consensus Store) to store it's state. Typically, in such a setup the nodes come and go, and also it should not matter who is currently the master. To make it easier to monitor such dynamic constellations pgwatch supports reading of cluster node info from all supported DCSs (etcd, Zookeeper, Consul), but currently only for simpler cases with no security applied (which is actually the common case in a trusted environment).
"},{"location":"tutorial/preparing_databases.html#patroni-continuous-discovery","title":"patroni-continuous-discovery","text":"As normal patroni DB type but all DBs (or only those matching the regex if any provided) are monitored.
"},{"location":"tutorial/preparing_databases.html#patroni-namespace-discovery","title":"patroni-namespace-discovery","text":"Similar to patroni-continuous-discovery but all Patroni scopes (clusters) of an ETCD namespace are automatically monitored. Optionally regexes on database names still apply if provided.
Notice
All \"continuous\" modes expect access to \"template1\" or \"postgres\" databases of the specified cluster to determine the database names residing there.
"},{"location":"tutorial/upgrading.html","title":"Upgrading","text":"The pgwatch daemon code doesn't need too much maintenance itself (if you're not interested in new features), but the preset metrics, dashboards and the other components that pgwatch relies on, like Grafana, are under very active development and get updates quite regularly so already purely from the security standpoint it would make sense to stay up to date.
We also regularly include new component versions in the Docker images after verifying that they work. If using Docker, you could also choose to build your own images any time some new component versions are released, just increment the version numbers in the Dockerfile.
"},{"location":"tutorial/upgrading.html#updating-to-a-newer-docker-version","title":"Updating to a newer Docker version","text":""},{"location":"tutorial/upgrading.html#without-volumes","title":"Without volumes","text":"If pgwatch container was started in the simplest way possible without volumes, and if previously gathered metrics are not of great importance, and there are no user modified metric or dashboard changes that should be preserved, then the easiest way to get the latest components would be just to launch new container and import the old monitoring config:
# let's backup up the monitored hosts\npsql -p5432 -U pgwatch -d pgwatch -c \"\\copy monitored_db to 'monitored_db.copy'\"\n\n# stop the old container and start a new one ...\ndocker stop ... && docker run ....\n\n# import the monitored hosts\npsql -p5432 -U pgwatch -d pgwatch -c \"\\copy monitored_db from 'monitored_db.copy'\"\n
If metrics data and other settings like custom dashboards need to be preserved then some more steps are needed, but basically it's about pulling Postgres backups and restoring them into the new container.
A tip: to make the restore process easier it would already make sense to mount the host folder with the backups in it on the new container with \"-v \\~/pgwatch_backups:/pgwatch_backups:rw,z\"
when starting the Docker image. Otherwise, one needs to set up SSH or use something like S3 for example. Also note that port 5432 need to be exposed to take backups outside of Docker for Postgres respectively.
To make updates a bit easier, the preferred way to launch pgwatch should be to use Docker volumes for each individual component - see the Installing using Docker chapter for details. Then one can just stop the old container and start a new one, re-using the volumes.
With some releases though, updating to newer version might additionally still require manual rollout of Config DB schema migrations scripts, so always check the release notes for hints on that or just go to the \"pgwatch/sql/migrations\"
folder and execute all SQL scripts that have a higher version than the old pgwatch container. Error messages like will \"missing columns\" or \"wrong datatype\" will also hint at that, after launching with a new image. FYI - such SQL \"patches\" are generally not provided for metric updates, nor dashboard changes, and they need to be updated separately.
For a custom installation there's quite some freedom in doing updates - as components (Grafana, PostgreSQL) are loosely coupled, they can be updated any time without worrying too much about the other components. Only \"tightly coupled\" components are the pgwatch metrics collector, config DB and the optional Web UI - if the pgwatch config is kept in the database. If YAML based approach is used, then things are even more simple - the pgwatch daemon can be updated any time as YAML schema has default values for everything and there are no other \"tightly coupled\" components like the Web UI.
"},{"location":"tutorial/upgrading.html#updating-grafana","title":"Updating Grafana","text":"The update process for Grafana looks pretty much like the installation so take a look at the according chapter. If using Grafana package repository it should happen automatically along with other system packages. Grafana has a built-in database schema migrator, so updating the binaries and restarting is enough.
"},{"location":"tutorial/upgrading.html#updating-grafana-dashboards","title":"Updating Grafana dashboards","text":"There are no update or migration scripts for the built-in Grafana dashboards as it would break possible user applied changes. If you know that there are no user changes, then one can just delete or rename the existing ones in bulk and import the latest JSON definitions. See here for some more advice on how to manage dashboards.
"},{"location":"tutorial/upgrading.html#updating-the-config-metrics-db-version","title":"Updating the config / metrics DB version","text":"Database updates can be quite complex, with many steps, so it makes sense to follow the manufacturer's instructions here.
For PostgreSQL one should distinguish between minor version updates and major version upgrades. Minor updates are quite straightforward and problem-free, consisting of running something like:
apt update && apt install postgresql\nsudo systemctl restart postgresql\n
For PostgreSQL major version upgrades one should read through the according release notes (e.g. here) and be prepared for the unavoidable downtime.
"},{"location":"tutorial/upgrading.html#updating-the-pgwatch-schema","title":"Updating the pgwatch schema","text":"This is the pgwatch specific part, with some coupling between the following components - Config DB SQL schema, metrics collector, and the optional Web UI.
Here one should check from the CHANGELOG if pgwatch schema needs updating. If yes, then manual applying of schema diffs is required before running the new gatherer or Web UI. If no, i.e. no schema changes, all components can be updated independently in random order.
pgwatch --config=postgresql://localhost/pgwatch --upgrade\n
"},{"location":"tutorial/upgrading.html#updating-the-metrics-collector","title":"Updating the metrics collector","text":"Compile or install the gatherer from RPM / DEB / tarball packages. See the Custom installation chapter for details.
If using a SystemD service file to auto-start the collector then you might want to also check for possible updates on the template there - /etc/pgwatch/startup-scripts/pgwatch.service
.
In the YAML mode you always get new SQL definitions for the built-in metrics automatically when refreshing the sources via GitHub or pre-built packages, but with Config DB approach one needs to do it manually. Given that there are no user added metrics, it's simple enough though - just delete all old ones and re-insert everything from the latest metric definition SQL file.
pg_dump -t pgwatch.metric pgwatch > old_metric.sql # a just-in-case backup\npsql -c \"truncate pgwatch.metric\" pgwatch\npsql -f /etc/pgwatch/sql/config_store/metric_definitions.sql pgwatch\n
Warning
If you have added some own custom metrics be sure not to delete or truncate them!
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"index.html","title":"Hello world!","text":"pgwatch is a flexible PostgreSQL-specific monitoring solution, offering a comprehensive view of database performance and health. It provides a user-friendly interface through Grafana dashboards, allowing users to easily inspect various metrics and trends.
In the world of database management, monitoring plays a crucial role in ensuring stability, performance, and security. With a constant need to keep databases healthy and responsive, pgwatch answers three fundamental questions:
"},{"location":"index.html#what","title":"What?","text":"What sources to monitor?
pgwatch is designed specifically for monitoring PostgreSQL databases and related infrastructure. It covers a wide range of components crucial for PostgreSQL ecosystems, including:
This extended monitoring capability allows you to gain a comprehensive view of not only your PostgreSQL databases but also the surrounding infrastructure that supports and enhances your database operations.
"},{"location":"index.html#how","title":"How?","text":"What metrics are available for monitoring?
pgwatch provides out-of-the-box support for almost all essential PostgreSQL metrics, including:
In addition to the standard metrics, pgwatch can be easily extended to monitor custom metrics based on your specific needs. The solution offers flexibility to fine-tune monitoring details and the aggressiveness of data collection.
"},{"location":"index.html#where","title":"Where?","text":"Where are the measurements stored and where can users inspect the dashboards?
For a detailed list of all features and capabilities, please refer to the Features page.
"},{"location":"concept/components.html","title":"Components","text":"The main development idea around pgwatch was to do the minimal work needed and not to reinvent the wheel - meaning that pgwatch is mostly just about gluing together already some proven pieces of software for metrics storage and using Grafana for dashboarding. So here is a listing of components that can be used to build up a monitoring setup around the pgwatch metrics collector. Note that most components are not mandatory and for tasks like metrics storage there are many components to choose from.
"},{"location":"concept/components.html#the-metrics-gathering-daemon","title":"The metrics gathering daemon","text":"The metrics collector, written in Go, is the only mandatory and most critical component of the whole solution. The main task of the pgwatch collector / daemon is pretty simple - reading the configuration and metric definitions, fetching the metrics from the configured databases using the configured connection info and finally storing the metric measurements to some other database, or just exposing them over a port for scraping in case of Prometheus mode.
"},{"location":"concept/components.html#configuration","title":"Configuration","text":"The configuration says which databases, how often and with which metrics (SQL queries) are to be gathered. There are 2 options to store the configuration:
Many options here so that one can for example go for maximum storage effectiveness or pick something where they already know the query language:
"},{"location":"concept/components.html#postgresql","title":"PostgreSQL","text":"PostgreSQL is a world's most advanced Open Source RDBMS.
Postgres storage is based on the JSONB datatype so minimally version 9.4+ is required, but for bigger setups where partitioning is a must, v11+ is needed. Any already existing Postgres database will do the trick, see the Bootstrapping the Metrics DB section for details.
"},{"location":"concept/components.html#timescaledb","title":"TimescaleDB","text":"TimescaleDB is a time-series extension for PostgreSQL.
Although technically a plain extension it's often mentioned as a separate database system as it brings custom data compression to the table, enabling huge disk savings over standard Postgres. Note that pgwatch does not use Timescales built-in retention management but a custom version.
"},{"location":"concept/components.html#prometheus","title":"Prometheus","text":"Prometheus is a time series database and monitoring system.
Though Prometheus is not a traditional database system, it's a good choice for monitoring Cloud-like environments as the monitoring targets don't need to know too much about how actual monitoring will be carried out later and also Prometheus has a nice fault-tolerant alerting system for enterprise needs. By default, Prometheus is not set up for long term metrics storage!
"},{"location":"concept/components.html#json-files","title":"JSON files","text":"Plain text files for testing / special use cases.
"},{"location":"concept/components.html#the-web-ui","title":"The Web UI","text":"The second homegrown component of the pgwatch solution is an optional and relatively simple Web UI for administering details of the monitoring configuration like which databases should be monitored, with which metrics and intervals. Besides that there are some basic overview tables to analyze the gathered data and also possibilities to delete unneeded metric data (when removing a test host for example).
"},{"location":"concept/components.html#metrics-representation","title":"Metrics representation","text":"Standard pgwatch setup uses Grafana for analyzing the gathered metrics data in a visual, point-and-click way. For that a rich set of predefined dashboards for Postgres is provided, that should cover the needs of most users - advanced users would mostly always want to customize some aspects though, so it's not meant as a one-size-fits-all solution. Also as metrics are stored in a DB, they can be visualized or processed in any other way.
"},{"location":"concept/components.html#component-diagram","title":"Component diagram","text":"All components are loosely coupled, thus for non-pgwatch components (pgwatch components are only the metrics collector) you can decide to make use of an already existing installation of Postgres, Grafana or Prometheus and run additionally just the pgwatch collector.
"},{"location":"concept/installation_options.html","title":"Installation options","text":"Besides freedom of choosing from a set of metric measurements storage options one can also choose how is the monitoring configuration (connect strings, metric sets and intervals) going to be stored.
"},{"location":"concept/installation_options.html#configuration-database-based-operation","title":"Configuration database based operation","text":"This is the original central pull mode depicted on the architecture diagram. It requires a small schema to be rolled out on any Postgres database accessible to the metrics gathering daemon, which will hold the connect strings, metric definition SQLs and preset configurations and some other more minor attributes. For rollout details see the custom installation chapter.
The default Docker demo image cybertecpostgresql/pgwatch-demo
uses this approach.
One can deploy the gatherer daemon(s) decentralized with sources to be monitored defined in simple YAML files. In that case there is no need for the central Postgres configuration database. See the sample.sources.yaml config file for an example.
Note
In this mode you also may want, but not forced, to point out the path to metric definition YAML file when starting the gatherer. Also note that the configuration system supports multiple YAML files in a folder so that you could easily programmatically manage things via Ansible, for example, and you can also use environment variables inside YAML files.
"},{"location":"concept/long_term_installations.html","title":"Long term installations","text":"For long term pgwatch setups the main challenge is to keep the software up-to-date to guarantee stable operation and also to make sure that all DBs are under monitoring.
"},{"location":"concept/long_term_installations.html#keeping-inventory-in-sync","title":"Keeping inventory in sync","text":"Adding new DBs to monitoring and removing those shut down, can become a problem if teams are big, databases are many, and it's done by hand (common for on-premise, non-orchestrated deployments). To combat that, the most typical approach would be to write some script or Cronjob that parses the company's internal inventory database, files or endpoints and translate changes to according CRUD operations on the pgwatch.source table directly.
One could also use the REST API for that purpose.
If pgwatch configuration is kept in YAML files, it should be also relatively easy to automate the maintenance as the configuration can be organized so that one file represent a single monitoring entry, i.e. the --sources
and --metrics
parameters can also refer to a folder of YAML files.
The pgwatch metrics gathering daemon is the core component of the solution alas the most critical one. So it's definitely recommended to update it at least once per year or minimally when some freshly released Postgres major version instances are added to monitoring. New Postgres versions don't necessary mean that something will break, but you'll be missing some newly added metrics, plus the occasional optimizations. See the upgrading chapter for details, but basically the process is very similar to initial installation as the collector doesn't have any state on its own - it's just one executable file.
"},{"location":"concept/long_term_installations.html#metrics-maintenance","title":"Metrics maintenance","text":"Metric definition SQLs are regularly corrected as suggestions and improvements come in and also new ones are added to cover latest Postgres versions, so would make sense to refresh them 1-2x per year.
If using built-in metrics, just installing newer pre-built RPM / DEB packages will do the trick automatically but for configuration database based setups you'd need to follow a simple process described here.
"},{"location":"concept/long_term_installations.html#dashboard-maintenance","title":"Dashboard maintenance","text":"Same as with metrics, also the built-in Grafana dashboards are being actively updates, so would make sense to refresh them occasionally also. You could manually just re-import some dashboards of interest from JSON files in [/etc/pgwatch/grafana-dashboards] folder or from GitHub.
Info
Notable new dashboards are usually listed also in release notes and most dashboards also have a sample screenshots available.
"},{"location":"concept/long_term_installations.html#storage-monitoring","title":"Storage monitoring","text":"In addition to all that you should at least initially periodically monitor the metric measurements databases size as it can grow quite a lot (especially when using Postgres for storage) when the monitored databases have hundreds of tables and indexes, and if a lot of unique SQLs are used and pg_stat_statements
monitoring is enabled. If the storage grows too fast, one can increase the metric intervals (especially for \"table_stats\", \"index_stats\" and \"stat_statements\") or decrease the data retention periods via --retention
param.
For easy configuration management (adding databases to monitoring, adding metrics) there is a Web application bundled.
Besides managing the metrics gathering configurations, the two other useful features for the Web UI would be the possibility to look at the logs.
Default port: 8080
Sample screenshot of the Web UI:
"},{"location":"concept/web_ui.html#web-ui-security","title":"Web UI security","text":"By default, the Web UI is not secured - anyone can view and modify the monitoring configuration. If some security is needed though it can be enabled:
HTTPS
Password protection is controlled by --web-user
, --web-password
command-line parameters or PW_WEBUSER
, PW_WEBPASSWORD
environmental variables.
Note
It's better to use standard LibPQ .pgpass files so there's no requirement to store any passwords in pgwatch config database or YAML config file.
For security sensitive environments make sure to always deploy password protection together with SSL, as it uses a standard cookie based techniques vulnerable to snooping / MITM attacks.
"},{"location":"developer/CODE_OF_CONDUCT.html","title":"Citizen Code of Conduct","text":""},{"location":"developer/CODE_OF_CONDUCT.html#1-purpose","title":"1. Purpose","text":"A primary goal of pgwatch is to be inclusive to the largest number of contributors, with the most varied and diverse backgrounds possible. As such, we are committed to providing a friendly, safe and welcoming environment for all, regardless of gender, sexual orientation, ability, ethnicity, socioeconomic status, and religion (or lack thereof).
This code of conduct outlines our expectations for all those who participate in our community, as well as the consequences for unacceptable behavior.
We invite all those who participate in pgwatch to help us create safe and positive experiences for everyone.
"},{"location":"developer/CODE_OF_CONDUCT.html#2-open-sourceculturetech-citizenship","title":"2. Open [Source/Culture/Tech] Citizenship","text":"A supplemental goal of this Code of Conduct is to increase open [source/culture/tech] citizenship by encouraging participants to recognize and strengthen the relationships between our actions and their effects on our community.
Communities mirror the societies in which they exist and positive action is essential to counteract the many forms of inequality and abuses of power that exist in society.
If you see someone who is making an extra effort to ensure our community is welcoming, friendly, and encourages all participants to contribute to the fullest extent, we want to know.
"},{"location":"developer/CODE_OF_CONDUCT.html#3-expected-behavior","title":"3. Expected Behavior","text":"The following behaviors are expected and requested of all community members:
The following behaviors are considered harassment and are unacceptable within our community:
No weapons will be allowed at pgwatch events, community spaces, or in other spaces covered by the scope of this Code of Conduct. Weapons include but are not limited to guns, explosives (including fireworks), and large knives such as those used for hunting or display, as well as any other item used for the purpose of causing injury or harm to others. Anyone seen in possession of one of these items will be asked to leave immediately, and will only be allowed to return without the weapon. Community members are further expected to comply with all state and local laws on this matter.
"},{"location":"developer/CODE_OF_CONDUCT.html#6-consequences-of-unacceptable-behavior","title":"6. Consequences of Unacceptable Behavior","text":"Unacceptable behavior from any community member, including sponsors and those with decision-making authority, will not be tolerated.
Anyone asked to stop unacceptable behavior is expected to comply immediately.
If a community member engages in unacceptable behavior, the community organizers may take any action they deem appropriate, up to and including a temporary ban or permanent expulsion from the community without warning (and without refund in the case of a paid event).
"},{"location":"developer/CODE_OF_CONDUCT.html#7-reporting-guidelines","title":"7. Reporting Guidelines","text":"If you are subject to or witness unacceptable behavior, or have any other concerns, please notify a community organizer as soon as possible. Pavlo Golub.
Additionally, community organizers are available to help community members engage with local law enforcement or to otherwise help those experiencing unacceptable behavior feel safe. In the context of in-person events, organizers will also provide escorts as desired by the person experiencing distress.
"},{"location":"developer/CODE_OF_CONDUCT.html#8-addressing-grievances","title":"8. Addressing Grievances","text":"If you feel you have been falsely or unfairly accused of violating this Code of Conduct, you should notify cybertec-postgresql with a concise description of your grievance. Your grievance will be handled in accordance with our existing governing policies.
"},{"location":"developer/CODE_OF_CONDUCT.html#9-scope","title":"9. Scope","text":"We expect all community participants (contributors, paid or otherwise; sponsors; and other guests) to abide by this Code of Conduct in all community venues--online and in-person--as well as in all one-on-one communications pertaining to community business.
This code of conduct and its related procedures also applies to unacceptable behavior occurring outside the scope of community activities when such behavior has the potential to adversely affect the safety and well-being of community members.
"},{"location":"developer/CODE_OF_CONDUCT.html#10-contact-info","title":"10. Contact info","text":"Pavlo Golub Cybertec
"},{"location":"developer/CODE_OF_CONDUCT.html#11-license-and-attribution","title":"11. License and attribution","text":"The Citizen Code of Conduct is distributed by Stumptown Syndicate under a Creative Commons Attribution-ShareAlike license.
Portions of text derived from the Django Code of Conduct and the Geek Feminism Anti-Harassment Policy.
Revision 2.3. Posted 6 March 2017.
Revision 2.2. Posted 4 February 2016.
Revision 2.1. Posted 23 June 2014.
Revision 2.0, adopted by the Stumptown Syndicate board on 10 January 2013. Posted 17 March 2013.
"},{"location":"developer/LICENSE.html","title":"License","text":"BSD 3-Clause License
Copyright (c) 2022, CYBERTEC PostgreSQL International GmbH All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"},{"location":"developer/contributing.html","title":"Contributing to PGWatch","text":"Thank you for considering contributing to PGWatch! Here are some guidelines to help you get started.
"},{"location":"developer/contributing.html#code-of-conduct","title":"Code of Conduct","text":"Please read and follow our Code of Conduct.
"},{"location":"developer/contributing.html#communication-channels","title":"Communication Channels","text":"The main communication channel for the project is the GitHub repository. Feel free to open issues and participate in discussions there.
"},{"location":"developer/contributing.html#setting-up-the-development-environment","title":"Setting Up the Development Environment","text":"To set up the development environment, please refer to the instructions in the README.md file. We use Docker Compose to simplify the setup process.
"},{"location":"developer/contributing.html#how-to-report-bugs","title":"How to Report Bugs","text":"If you encounter any bugs, please report them by opening an issue in the GitHub repository issues section. Provide as much detail as possible to help us understand and resolve the issue.
"},{"location":"developer/contributing.html#how-to-request-features","title":"How to Request Features","text":"If you have a feature request, please start a discussion in the GitHub repository discussions section. We value your feedback and ideas!
"},{"location":"developer/contributing.html#submitting-changes","title":"Submitting Changes","text":"Before submitting any changes, please discuss them in the GitHub repository discussions section. This helps ensure that your contribution aligns with the project goals and prevents duplicate efforts.
When you are ready to submit your changes, create a pull request. Make sure your pull request:
We follow the Go Style Guide. Please ensure your code adheres to these guidelines.
"},{"location":"developer/contributing.html#testing","title":"Testing","text":"We require tests for all changes. Please use the standard Go testing facilities. Ensure that all tests pass before submitting your pull request.
"},{"location":"developer/contributing.html#documentation","title":"Documentation","text":"Documentation for the project resides in the same repository. If you make changes that require documentation updates, please include those changes in your pull request.
"},{"location":"developer/contributing.html#contributor-license-agreement-cla","title":"Contributor License Agreement (CLA)","text":"We do not require contributors to sign a Contributor License Agreement (CLA). By submitting a pull request, you agree that your contributions are submitted under the same license as the project.
We appreciate your contributions and efforts to improve PGWatch. If you have any questions, feel free to reach out through the GitHub repository.
Thank you!
"},{"location":"developer/godoc.html","title":"Documentation","text":""},{"location":"gallery/dashboards.html","title":"Dashboards","text":"Dashboards are a collection of visualizations that are displayed in a single page. They are useful for monitoring and analyzing data.
Health Check
Global Health
Biggest Tables Treemap
Checkpointer, Background Writer, I/O statistics
Indexes Overview
Database Overview With Time Lag Comparison
Database Overview for Developers (Unprivileged)
Global Databases Overview
Change Events
PostgreSQL Versions Overview
Recommendations
Replication Lag
Server Log Events
Realtime Execution Plans
Stat Activity Realtime
Stat Statements SQL Search
Stat Statements Top Visual
Stat Statements Top
System Statistics
Tables Top
"},{"location":"gallery/webui.html","title":"Web User Interface","text":"The Web User Interface (WebUI) allows you to interact with the pgwatch and control monitored sources, metrics and presets definitions, and view and logs.
Sources
Metrics
Presets
Logs
"},{"location":"howto/dashboarding_alerting.html","title":"Grafana intro","text":"To display the gathered and stored metrics the pgwatch project has decided to rely heavily on the popular Grafana dashboarding solution. This means only though that it's installed in the default Docker images and there's a set of predefined dashboards available to cover most of the metrics gathered via the Preset Configs.
This does not mean though that Grafana is in any way tightly coupled with project's other components - quite the opposite actually, one can use any other means / tools to use the metrics data gathered by the pgwatch daemon.
Currently, there are around 30 preset dashboards available for PostgreSQL data sources. Due to that nowadays, if metric gathering volumes are not a problem, we recommend using Postgres storage for most users.
Note though that most users will probably want to always adjust the built-in dashboards slightly (colors, roundings, etc.), so that they should be taken only as examples to quickly get started. Also note that in case of changes it's not recommended to change the built-in ones, but use the Save as features - this will allow later to easily update all the dashboards en masse per script, without losing any custom user changes.
Links:
Built-in dashboards for PostgreSQL (TimescaleDB) storage
Screenshots of pgwatch default dashboards
The online Demo site
"},{"location":"howto/dashboarding_alerting.html#alerting","title":"Alerting","text":"Alerting is very conveniently also supported by Grafana in a simple point-and-click style - see here for the official documentation. In general all more popular notification services are supported, and it's pretty much the easiest way to quickly start with PostgreSQL alerting on a smaller scale. For enterprise usage with hundreds of instances it's might get too \"clicky\" though and there are also some limitations - currently you can set alerts only on Graph panels and there must be no variables used in the query so you cannot use most of the pre-created pgwatch graphs, but need to create your own.
Nevertheless, alerting via Grafana is s a good option for lighter use cases and there's also a preset dashboard template named \"Alert Template\" from the pgwatch project to give you some ideas on what to alert on.
Note though that alerting is always a bit of a complex topic - it requires good understanding of PostgreSQL operational metrics and also business criticality background infos, so we don't want to be too opinionated here, and it's up to the users to implement.
"},{"location":"howto/metrics_db_bootstrap.html","title":"Choosing a Database","text":"pgwatch supports multiple databases for storing metrics measurements. The following databases are supported:
We will use PostgreSQL in this guide. But the steps are similar for other databases. It's up to you to choose the database that best fits your needs and set it up accordingly.
"},{"location":"howto/metrics_db_bootstrap.html#creating-the-database","title":"Creating the Database","text":"First, we need to create a database for storing the metrics measurements. We will use the psql
command-line tool to create the database. You can also use a GUI tool like pgAdmin to create the database.
Let's assume we want to create a database named measurements
on a completely fresh PostgreSQL installation. It is wise to use a special role for the metrics database, so we will create a role named pgwatch
and assign it to the measurements
database.
$ psql -U postgres -h 10.0.0.42 -p 5432 -d postgres\npsql (17.2)\n\npostgres=# CREATE ROLE pgwatch WITH LOGIN PASSWORD 'pgwatchadmin';\nCREATE ROLE\n\npostgres=# CREATE DATABASE measurements OWNER pgwatch;\nCREATE DATABASE\n
That's it! We have created a database named measurements
with the owner pgwatch
. Now we can proceed to the next step.
pgwatch will automatically create the necessary tables and indexes in the database when it starts. You don't need to create any tables or indexes manually.
You can now configure pgwatch to use the measurements
database as the sink for storing metrics measurements.
$ pgwatch --sources=/etc/sources.yaml --sink=postgresql://pgwatch@10.0.0.42/measurements\n[INFO] [sink:postgresql://pgwatch@10.0.0.42/measurements] Initialising measurements database...\n[INFO] [sink:postgresql://pgwatch@10.0.0.42/measurements] Measurements sink activated\n...\n
That's it! You have successfully bootstrapped the metrics measurements database for pgwatch. You can now start collecting metrics from your sources and storing them in the database.
If now you want to see the tables created by pgwatch in the measurements
database, you can connect to the database using the psql
command-line tool and list the tables.
$ psql -U pgwatch -h 10.0.0.42 -p 5432 -d measurements\npsql (17.2)\n\nmeasurements=> \\dn\n List of schemas\n Name | Owner\n---------------+---------\n admin | pgwatch\n subpartitions | pgwatch\n(3 rows)\n
You can see that pgwatch has created the admin
and subpartitions
schemas in the measurements
database. These schemas contain the tables and indexes used by pgwatch to store metrics measurements. You may examine these schemas to understand how pgwatch stores metrics measurements in the database.
Tip
You can also add --log-level=debug
command-line parameter to see every SQL query executed by pgwatch. This can be useful for debugging purposes. But remember that this will log a lot of information, so it is wise to use it with empty sources this time, meaning there are no database to monitor yet.
Min 1GB of RAM is required for a Docker setup using Postgres to store metrics.
The gatherer alone needs typically less than 50 MB if the metric measurements are stored online. Memory consumption will increase a lot when the metrics store is offline though, as then metrics are cached in RAM in ring buffer style up to a limit of 10k data points (for all databases) and then memory consumption is dependent on how \"wide\" are the metrics gathered.
Storage requirements vary a lot and are hard to predict.
10GB of disk space should be enough though for monitoring a single DB with \"exhaustive\" preset for 1 month with Postgres storage. 2 weeks is also the default metrics retention policy for Postgres running in Docker (configurable). Depending on the amount of schema objects - tables, indexes, stored procedures and especially on number of unique SQLs, it could be also much more. If disk size reduction is wanted for PostgreSQL storage then best would be to use the TimescaleDB extension - it has built-in compression and disk footprint is x5 time less than vanilla Postgres, while retaining full SQL support.
A low-spec (1 vCPU, 2 GB RAM) cloud machine can easily monitor 100 DBs in \"exhaustive\" settings (i.e. almost all metrics are monitored in 1-2min intervals) without breaking a sweat (\\<20% load).
A single Postgres node should handle thousands of requests per second.
When high metrics write latency is problematic (e.g. using a DBaaS across the Atlantic) then increasing the default maximum batching delay of 250ms usually gives good results. Relevant params: --batching-delay-ms / PW_BATCHING_MAX_DELAY_MS
.
Note that when monitoring a very large number of databases, it's possible to \"shard\" / distribute them between many metric collection instances running on different hosts, via the group
attribute. This requires that some hosts have been assigned a non-default group identifier, which is just a text field exactly for this sharding purpose. Relevant params: --group / PW_GROUP
.
Although all cloud service providers offer some kind of built-in instrumentation and graphs, they're mostly rather conservative in this are not to consume extra server resources and not to overflow and confuse beginners with too much information. So for advanced troubleshooting it might make sense to gather some additional metrics on your own, especially given that you can also easily add custom business metrics to pgwatch using plain SQL, for example to track the amount of incoming sales orders. Also with pgwatch / Grafana you have more freedom on the visual representation side and access to around 30 prebuilt dashboards and a lot of freedom creating custom alerting rules.
The common denominator for all managed cloud services is that they remove / disallow dangerous or potentially dangerous functionalities like file system access and untrusted PL-languages like Python - so you'll lose a small amount of metrics and \"helper functions\" compared to a standard on-site setup described in the previous chapter <preparing_databases>
. This also means that you will get some errors displayed on some preset dashboards like \"DB overview\" and thus will be better off using a dashboard called \"DB overview Unprivileged\" tailored specially for such a use case.
pgwatch has been tested to work with the following managed database services:
"},{"location":"howto/using_managed_services.html#google-cloud-sql-for-postgresql","title":"Google Cloud SQL for PostgreSQL","text":"pg_monitor
system role available.gce
.To get most out pgwatch on GCE you need some additional clicks in the GUI / Cloud Console \"Flags\" section to enable some common PostgreSQL monitoring parameters like track_io_timing
and track_functions
.
No Python / OS helpers possible. OS metrics can be integrated in Grafana though using the CloudWatch data source
pg_monitor
system role available.
pgwatch default preset names: rds
, aurora
Documentation:
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.AuroraPostgreSQL.html
Note that the AWS Aurora PostgreSQL-compatible engine is missing some additional metrics compared to normal RDS.
"},{"location":"howto/using_managed_services.html#azure-database-for-postgresql","title":"Azure Database for PostgreSQL","text":"pg_monitor
system role available.azure
Surprisingly on Azure some file access functions are whitelisted, thus one can for example use the wal_size
metrics.
Note
By default Azure has pg_stat_statements not fully activated by default, so you need to enable it manually or via the API. Documentation link here.
"},{"location":"howto/using_managed_services.html#aiven-for-postgresql","title":"Aiven for PostgreSQL","text":"The Aiven developer documentation contains information on how to monitor PostgreSQL instances running on the Aiven platform with pgwatch.
"},{"location":"intro/features.html","title":"List of main features","text":"The pgwatch project got started back in 2016 by Kaarel Moppel and released in 2017 initially for internal monitoring needs at Cybertec as all the Open Source PostgreSQL monitoring tools at the time had various limitations like being too slow and invasive to set up or providing a fixed set of visuals and metrics.
For more background on the project motivations and design goals see the original series of blogposts announcing the project and the following feature updates released approximately twice per year.
Cybertec also provides commercial 9-to-5 and 24/7 support for pgwatch.
For feature requests or troubleshooting assistance please open an issue on project's Github page.
"},{"location":"reference/advanced_features.html","title":"Advanced features","text":"Over the years the core functionality of fetching metrics from a set of plain Postgres DB-s has been extended in many ways to cover some common problem areas like server log monitoring and supporting monitoring of some other popular tools often used together with Postgres, like the PgBouncer connection pooler for example.
"},{"location":"reference/advanced_features.html#patroni-support","title":"Patroni support","text":"Patroni is a popular Postgres specific HA-cluster manager that makes node management simpler than ever, meaning that everything is dynamic though - cluster members can come and go, making monitoring in the standard way a bit tricky. But luckily Patroni cluster members information is stored in a DCS (Distributed Consensus Store), like etcd, so it can be fetched from there periodically.
When 'patroni' is selected as a source type then the usual Postgres host/port fields should be left empty (\"dbname\" can still be filled if only a specific single database is to be monitored) and instead \"Host config\" JSON field should be filled with DCS address, type and scope (cluster name) information. A sample config (for Config DB based setups) looks like:
{\n \"dcs_type\": \"etcd\",\n \"dcs_endpoints\": [\"http://127.0.0.1:2379\"],\n \"scope\": \"batman\",\n \"namespace\": \"/service/\"\n }\n
For YAML based setups an example can be found from the instances.yaml file.
If Patroni is powered by etcd, then also username, password, ca_file, cert_file, key_file optional security parameters can be defined - other DCS systems are currently only supported without authentication.
Also, if you don't use the standby nodes actively for queries then it might make sense to decrease the volume of gathered metrics and to disable the monitoring of such nodes with the \"Master mode only?\" checkbox (when using the Web UI) or with only_if_master=true if using a YAML based setup.
"},{"location":"reference/advanced_features.html#log-parsing","title":"Log parsing","text":"As of v1.7.0 the metrics collector daemon, when running on a DB server (controlled best over a YAML config), has capabilities to parse the database server logs for errors. Out-of-the-box it will though only work when logs are written in CSVLOG format. For other formats user needs to specify a regex that parses out named groups of following fields: database_name, error_severity. See here for an example regex.
Note that only the event counts are stored, no error texts, usernames or other infos! Errors are grouped by severity for the monitored DB and for the whole instance. The metric name to enable log parsing is \"server_log_event_counts\". Also note that for auto-detection of log destination / setting to work, the monitoring user needs superuser / pg_monitor privileges - if this is not possible then log settings need to be specified manually under \"Host config\" as seen for example here.
Sample configuration if not using CSVLOG logging:
On Postgres side (on the monitored DB)
# Debian / Ubuntu default log_line_prefix actually\n log_line_prefix = '%m [%p] %q%u@%d '\n
YAML config (recommended when \"pushing\" metrics from DB nodes to a central metrics DB) ## logs_glob_path is only needed if the monitoring user is cannot auto-detect it (i.e. not a superuser / pg_monitor role)\n # logs_glob_path:\n logs_match_regex: '^(?P<log_time>.*) \\[(?P<process_id>\\d+)\\] (?P<user_name>.*)@(?P<database_name>.*?) (?P<error_severity>.*?): '\n
For log parsing to work the metric server_log_event_counts needs to be enabled or a preset config including it used - like the \"full\" preset."},{"location":"reference/advanced_features.html#pgbouncer-support","title":"PgBouncer support","text":"pgwatch also supports collecting internal statistics from the PgBouncer connection pooler, via the built-in special \"pgbouncer\" database and the SHOW STATS
command. To enable it choose the according DB Type, provide connection info to the pooler port and make sure the pgbouncer_stats metric or \"pgbouncer\" preset config is selected for the host. Note that for the \"DB Name\" field you should insert not \"pgbouncer\" (although this special DB provides all the statistics) but the real name of the pool you wish to monitor or leave it empty to track all pools. In latter case individual pools will be identified / separated via the \"database\" tag.
There's also a built-in Grafana dashboard for PgBouncer data, looking like that:
"},{"location":"reference/advanced_features.html#pgpool-ii-support","title":"Pgpool-II support","text":"Quite similar to PgBouncer, also Pgpool offers some statistics on pool performance and status, which might be of interest especially if using the load balancing features. To enable it choose the according DB Type, provide connection info to the pooler port and make sure the pgpool_stats metric / preset config is selected for the host.
The built-in Grafana dashboard for Pgpool data looks something like that:
"},{"location":"reference/advanced_features.html#prometheus-scraping","title":"Prometheus scraping","text":"pgwatch was originally designed with direct metrics storage in mind, but later also support for externally controlled Prometheus scraping was added.
To enable the scraping endpoint, add this commandline parameter: --sink=prometheus://<host>:<port>/<namespace>
. If you omit host (Ex: --sink=prometheus://:8080
), server listens on all interfaces and supplied port. If you omit namespace, default is pgwatch
.
Additionally, note that you still need to specify some metrics config as usual - only metrics with interval values bigger than zero will be populated on scraping.
Currently, a few built-in metrics that require some state to be stored between scrapes, e.g. the \"change_events\" metric, will currently be ignored. Also, non-numeric data columns will be ignored! Tag columns will be preserved though as Prometheus \"labels\".
"},{"location":"reference/advanced_features.html#cloud-providers-support","title":"Cloud providers support","text":"Due to popularity of various managed PostgreSQL offerings there's also support for some managed options in sense of Preset Configs, that take into account the fact that on such platforms you get a limited user that doesn't have access to all metrics or some features have just been plain removed. Thus, to reduce server log errors and save time on experimenting there are following presets available:
Some variables influence multiple components. Command line parameters override env. variables (when doing custom deployments).
"},{"location":"reference/env_variables.html#docker-image-specific","title":"Docker image specific","text":"See pgwatch --help
output for details.
A basic Helm chart templates for installing pgwatch to a Kubernetes cluster are available as a standalone repository.
Notice
Charts are not considered as a part of pgwatch and are not maintained by pgwatch developers.
The corresponding setup can be found in repository, whereas installation is done via the following commands:
cd openshift_k8s\nhelm install -f chart-values.yml pgwatch ./helm-chart\n
Please have a look at helm-chart/values.yaml
to get additional information of configurable options.
Metrics are named SQL queries that return a timestamp and pretty much anything else you find useful. Most metrics have many different query text versions for different target PostgreSQL versions, also optionally taking into account primary / replica state and as of v1.8 also versions of installed extensions.
-- a sample metric\nSELECT\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int;\n
Correct version of the metric definition will be chosen automatically by regularly connecting to the target database and checking the Postgres version, recovery state, and if the monitoring user is a superuser or not.
"},{"location":"reference/metric_definitions.html#built-in-metrics-and-presets","title":"Built-in metrics and presets","text":"There's a good set of pre-defined metrics & metric configs provided by the pgwatch project to cover all typical needs, but when monitoring hundreds of hosts you'd typically want to develop some custom Preset Configs or at least adjust the metric fetching intervals according to your monitoring goals.
Some things to note about the built-in metrics:
The \"change_events\" built-in metric, tracking DDL & config changes, uses internally some other \"*_hashes\" metrics which are not meant to be used on their own. Such metrics are described also accordingly on the Web UI /metrics page, and they should not be removed.
"},{"location":"reference/metric_definitions.html#recommendations","title":"recommendations","text":"When enabled (i.e. interval > 0
), this metric will find all other metrics starting with reco_*
and execute those queries. The purpose of the metric is to spot some performance, security and other \"best practices\" violations. Users can add new reco_*
queries freely.
This enables Postgres server log \"tailing\" for errors. Can't be used for \"pull\" setups though unless the DB logs are somehow mounted / copied over, as real file access is needed. See the Log parsing chapter for details.
"},{"location":"reference/metric_definitions.html#instance_up","title":"instance_up","text":"For normal metrics there will be no data rows stored if the DB is not reachable, but for this one there will be a 0 stored for the \"is_up\" column that under normal operations would always be 1. This metric can be used to calculate some \"uptime\" SLA indicator for example.
"},{"location":"reference/metric_definitions.html#archiver","title":"archiver","text":"This metric retrieves key statistics from the PostgreSQL pg_stat_archiver
view, providing insights into the status of WAL file archiving. It returns the total number of successfully archived files and failed archiving attempts. Additionally, it identifies if the most recent attempt resulted in a failure and calculates how many seconds have passed since the last failure. The metric only considers data if WAL archiving is enabled in the system, helping administrators monitor and diagnose issues related to the archiving process.
This metric gathers detailed information from the PostgreSQL pg_stat_activity
view, providing an overview of the current session and activity state for the database. It tracks the total number of client backends, active sessions, idle sessions, sessions waiting on locks, and background workers. The metric also calculates statistics on blocked sessions, longest waiting times, average and longest session durations, transaction times, and query durations. Additionally, it monitors autovacuum worker activity and provides the age of the oldest transaction (measured by xmin
). This metric helps administrators monitor session states, detect bottlenecks, and ensure the system is within its connection limits, providing visibility into database performance and contention.
This metric retrieves statistics from the pg_stat_bgwriter
view, providing information about the background writer process in PostgreSQL. It reports the number of buffers that have been cleaned (written to disk) by the background writer, how many times buffers were written because the background writer reached the maximum limit (maxwritten_clean
), and the total number of buffers allocated. Additionally, it calculates the time in seconds since the last reset of these statistics. This metric helps monitor the efficiency and behavior of PostgreSQL's background writer, which plays a crucial role in managing I/O by writing modified buffers to disk, thus helping to ensure smooth database performance.
This metric provides information about lock contention in PostgreSQL by identifying sessions that are waiting for locks and the sessions holding those locks. It captures details from the pg_locks
view and the pg_stat_activity
view to highlight the interactions between the waiting and blocking sessions. The result helps identify which queries are causing delays due to lock contention, the type of locks involved, and the users or sessions responsible for holding or waiting on locks. This metric is useful for diagnosing performance bottlenecks related to database locking.
This metric provides insights into the activity and performance of PostgreSQL's checkpointer process, which ensures that modified data pages are regularly written to disk to maintain consistency. It tracks the number of checkpoints that have been triggered either by the system's timing or by specific requests, as well as how many restart points have been completed in standby environments. Additionally, it measures the time spent writing and synchronizing buffers to disk, the total number of buffers written, and how long it has been since the last reset of these statistics. This metric helps administrators understand how efficiently the system is handling checkpoints and whether there might be I/O performance issues related to the frequency or duration of checkpoint operations.
"},{"location":"reference/metric_definitions.html#db_stats","title":"db_stats","text":"This metric provides a comprehensive overview of various performance and health statistics for the current PostgreSQL database. It tracks key metrics such as the number of active database connections (numbackends
), transaction statistics (committed, rolled back), block I/O (blocks read and hit in the cache), and tuple operations (rows returned, fetched, inserted, updated, deleted). Additionally, it monitors conflicts, temporary file usage, deadlocks, and block read/write times.
The metric also includes system uptime by calculating how long the PostgreSQL postmaster
process has been running and tracks checksum failures and the time since the last checksum failure. It identifies if the database is in recovery mode, retrieves the system identifier, and tracks session-related statistics such as total session time, active time, idle-in-transaction time, and sessions that were abandoned, fatal, or killed.
Lastly, it monitors the number of invalid indexes that are not currently being rebuilt. This metric helps database administrators gain insights into overall database performance, transaction behavior, session activity, and potential index-related issues, which are critical for efficient database management and troubleshooting.
"},{"location":"reference/metric_definitions.html#wal","title":"wal","text":"This metric tracks key information about the PostgreSQL system's write-ahead logging (WAL) and recovery state. It calculates the current WAL location, showing how far the system has progressed in terms of WAL writing or replaying if in recovery mode. The metric also indicates whether the database is in recovery, monitors the system's uptime since the postmaster
process started, and provides the system's unique identifier. Additionally, it retrieves the current timeline, which is essential for tracking the state of the WAL log and recovery process. This metric helps administrators monitor database health, especially in terms of recovery and WAL operations.
This metric identifies lock contention in the PostgreSQL database by tracking sessions that are waiting for locks and the corresponding sessions holding those locks. It examines active queries in the current database and captures detailed information about both the waiting and blocking sessions. For each waiting session, it records the lock type, user, lock mode, and the query being executed, as well as the table involved. Similarly, for the session holding the lock, it captures the same details. This helps database administrators identify queries that are causing delays due to lock contention, enabling them to troubleshoot performance issues and optimize query execution.
"},{"location":"reference/metric_definitions.html#kpi","title":"kpi","text":"This metric provides a detailed overview of PostgreSQL database performance and activity. It tracks the current WAL (Write-Ahead Log) location, the number of active and blocked backends, and the oldest transaction time. It calculates the total transaction rate (TPS) by summing committed and rolled-back transactions, as well as specific statistics on table and index performance, such as the number of sequential scans on tables larger than 10MB and the number of function calls.
Additionally, the metric tracks block read and write times, the amount of temporary bytes used, deadlocks, and whether the database is in recovery mode. Finally, it calculates the uptime of the PostgreSQL postmaster
process. This information helps administrators monitor and manage system performance, detect potential bottlenecks, and optimize query and transaction behavior.
This metric provides detailed statistics about the performance and resource usage of SQL queries executed on the PostgreSQL database. It collects data from the pg_stat_statements
view, focusing on queries that have been executed more than five times and have significant execution time (greater than 5 milliseconds). It aggregates important performance metrics for each query, such as:
calls
), total execution time, and total planning time.The metric ranks queries based on different performance factors, including execution time, number of calls, block reads/writes, and temporary block usage, and it limits the results to the top 100 queries in each category. This helps administrators identify resource-intensive queries, optimize database performance, and improve query efficiency by focusing on those that consume the most I/O or take the longest to execute.
"},{"location":"reference/metric_definitions.html#table_stats","title":"table_stats","text":"This metric collects and summarizes detailed information about table sizes, table activity, and maintenance operations in PostgreSQL. It tracks both individual tables and partitioned tables, including their root partitions. The metric calculates the size of each table (in bytes), as well as other key statistics like sequential scans, index scans, tuples inserted, updated, or deleted, and the number of live and dead tuples. It also tracks maintenance operations like vacuum and analyze runs, as well as whether autovacuum is disabled for specific tables.
For partitioned tables, the metric aggregates the statistics across all partitions and provides a summary of the partitioned table as a whole, marking it as the root partition. Additionally, it calculates the time since the last vacuum and analyze operations and captures transaction freeze age for each table, which helps monitor when a table might need a vacuum to prevent transaction wraparound.
By focusing on tables larger than 10MB and ignoring temporary and system tables, this metric helps database administrators monitor the largest and most active tables in their database, ensuring that maintenance operations like vacuum and analyze are running effectively and identifying tables that may be contributing to performance bottlenecks due to size or activity.
"},{"location":"reference/metric_definitions.html#custom-metrics","title":"Custom metrics","text":"For defining metrics definitions you should adhere to a couple of basic concepts:
Every metric query should have an epoch_ns
(nanoseconds since epoch column) to record the metrics reading time. If the column is not there, things will still work but server timestamp of the metrics gathering daemon will be used, some a small loss (assuming intra-datacenter monitoring with little lag) of precision occurs.
Queries should only return text, integer, boolean or floating point (a.k.a. double precision) Postgres data types. Note that columns with NULL values are not stored at all in the data layer as it's a bit bothersome to work with NULLs!
Column names should be descriptive enough so that they're self-explanatory, but not too long as it costs storage
Metric queries should execute fast - at least below the selected Statement timeout (default 5s)
Columns can be optionally \"tagged\" by prefixing them with tag_
. By doing this, the column data will be indexed by the Postgres giving following advantages:
All fetched metric rows can also be \"prettyfied\" with any custom static key-value data, per host. To enable use the \"Custom tags\" Web UI field for the monitored DB entry or \"custom_tags\" YAML field. Note that this works per host and applies to all metrics.
For Prometheus the numerical columns are by default mapped to a Value Type of \"Counter\" (as most Statistics Collector columns are cumulative), but when this is not the case and the column is a \"Gauge\" then according column attributes should be declared. See below section on column attributes for details.
For Prometheus all text fields will be turned into tags / labels as only floats can be stored!
/etc/pgwatch/metrics
. The folder name will be the metrics name, so choose wisely.Create a new subfolder for each \"minimally supported\" Postgres version and insert the metrics SQL definition into a file named \"metric.sql\".
Notice
Note the \"minimally supported\" part - i.e. if your query will work from version v11.0 to v17 then you only need one entry called \"11\". If there was a breaking change in the internal catalogs at v13 so that the query stopped working, you need a new entry named \"13\" that will be used for all versions above v13.
Activate the newly added metric by including it in some existing Preset Config or add it directly to the YAML config \"custom_metrics\" section.
The behaviour of plain metrics can be extended with a set of attributes that will modify the gathering in some way. The attributes are stored in YAML files called metric_attrs.yaml in a metrics root directory or in the metric_attribute
Config DB table.
Currently supported attributes are:
is_instance_level
Enables caching, i.e. sharing of metric data between various databases of a single instance to reduce load on the monitored server.
wal:\n sqls:\n 11: |\n select /* pgwatch_generated */\n ...\n gauges:\n - '*'\n is_instance_level: true\n
statement_timeout_seconds
Enables to override the default 'per monitored DB' statement timeouts on metric level.
metric_storage_name
Enables dynamic \"renaming\" of metrics at storage level, i.e. declaring almost similar metrics with different names but the data will be stored under one metric. Currently used (for out-of-the box metrics) only for the stat_statements_no_query_text
metric, to not store actual query texts from the \"pg_stat_statements\" extension for more security sensitive instances.
extension_version_based_overrides
Enables to \"switch out\" the query text from some other metric based on some specific extension version. See 'reco_add_index' for an example definition.
disabled_days
Enables to \"pause\" metric gathering on specified days. See metric_attrs.yaml
for \"wal\" for an example.
disabled_times
Enables to \"pause\" metric gathering on specified time intervals. e.g. \"09:00-17:00\" for business hours. Note that if time zone is not specified the server time of the gather daemon is used. disabled_days / disabled_times can also be defined both on metric and host (host_attrs) level.
Besides the _tag column prefix modifier, it's also possible to modify the output of certain columns via a few attributes. It's only relevant for Prometheus output though currently, to set the correct data types in the output description, which is generally considered a nice-to-have thing anyway. For YAML based setups this means adding a \"column_attrs.yaml\" file in the metrics top folder and for Config DB based setups an according \"column_attrs\" JSON column should be filled via the Web UI.
Supported column attributes:
gauges
Describe the mentioned output columns as of TYPE gauge, i.e. the value can change any time in any direction. Default TYPE for pgwatch is counter.
table_stats_approx:\n sqls:\n 11: |\n ...\n gauges:\n - table_size_b\n - total_relation_size_b\n - toast_size_b\n - seconds_since_last_vacuum\n - seconds_since_last_analyze\n - n_live_tup\n - n_dead_tup\n metric_storage_name: table_stats\n
As mentioned in Helper Functions section, Postgres knows very little about the Operating System that it's running on, so in some (most) cases it might be advantageous to also monitor some basic OS statistics together with the PostgreSQL ones, to get a better head start when troubleshooting performance problems. But as setup of such OS tools and linking the gathered data is not always trivial, pgwatch has a system of helpers for fetching such data.
One can invent and install such helpers on the monitored databases freely to expose any information needed (backup status etc.) via Python, or any other PL-language supported by Postgres, and then add these metrics similarly to any other Postgres-native metrics.
"},{"location":"reference/security.html","title":"Security aspects","text":""},{"location":"reference/security.html#general-security-information","title":"General security information","text":"Security can be tightened for most pgwatch components quite granularly, but the default values for the Docker image don't focus on security though but rather on being quickly usable for ad-hoc performance troubleshooting, which is where the roots of pgwatch lie.
Some points on security:
The administrative Web UI doesn't have by default any security. Configurable via env. variables.
Viewing Grafana dashboards by default doesn't require login. Editing needs a password. Configurable via env. variables.
Dashboards based on the \"stat_statements\" metric (Stat Statement Overview / Top) expose actual queries.
They should be \"mostly\" stripped of details though and replaced by placeholders by Postgres, but if no risks can be taken such dashboards (or at least according panels) should be deleted. Or as an alternative the stat_statements_no_query_text
and pg_stat_statements_calls
metrics could be used, which don't store query texts in the first place.
Safe certificate connections to Postgres are supported. According sslmode (verify-ca, verify-full) and cert file paths need to be specified then in connection string on Web UI \"/dbs\" page or in the YAML config.
Note that although pgwatch can handle password security, in many cases it's better to still use the standard LibPQ .pgpass file to store passwords.
Some common sense security is built into default Docker images for all components but not activated by default. A sample command to launch pgwatch with following security \"checkpoints\" enabled:
Password encryption for connect strings stored in the Config DB
docker run --name pw3 -d --restart=unless-stopped \\\n -p 3000:3000 -p 8080:8080 \\\n -e PW_GRAFANASSL=1 -e PW_WEBSSL=1 \\\n -e PW_GRAFANANOANONYMOUS=1 -e PW_GRAFANAUSER=myuser \\\n -e PW_GRAFANAPASSWORD=mypass \\\n -e PW_WEBNOANONYMOUS=1 -e PW_WEBNOCOMPONENTLOGS=1 \\\n -e PW_WEBUSER=myuser -e PW_WEBPASSWORD=mypass \\\n -e PW_AES_GCM_KEYPHRASE=qwerty \\\n cybertec/pgwatch\n
For custom installs it's up to the user though. A hint - Docker launcher files can also be inspected to see which config parameters are being touched.
"},{"location":"reference/technical_details.html","title":"Technical details","text":"Here are some technical details that might be interesting for those who are planning to use pgwatch for critical monitoring tasks or customize it in some way.
Dynamic management of monitored databases, metrics and their intervals - no need to restart/redeploy
Config DB or YAML / SQL files are scanned every 2 minutes (by default, changeable via --servers-refresh-loop-seconds
) and changes are applied dynamically. As common connectivity errors also handled, there should be no need to restart the gatherer \"for fun\". Please always report issues which require restarting.
There are some safety features built-in so that monitoring would not obstruct actual operation of databases
-e PW_WEBSSL=1 -e PW_GRAFANASSL=1
when launching Docker)Instance-level metrics caching
To further reduce load on multi-DB instances, pgwatch can cache the output of metrics that are marked to gather only instance-level data. One such metric is for example \"wal\", and the metric attribute is \"is_instance_level\". Caching will be activated only for continuous source types, and to a default limit of up to 30 seconds (changeable via the --instance-level-cache-max-seconds
param).
As described in the Components chapter, there is a couple of ways how to set up pgwatch. Two most common ways though are the central Config DB based \"pull\" approach and the YAML file based \"push\" approach, plus Grafana to visualize the gathered metrics.
"},{"location":"tutorial/custom_installation.html#config-db-based-setup","title":"Config DB based setup","text":""},{"location":"tutorial/custom_installation.html#overview-of-installation-steps","title":"Overview of installation steps","text":"Below are the sample steps for a custom installation from scratch using Postgres for the pgwatch configuration DB, metrics DB and Grafana config DB.
All examples here assume Ubuntu as OS - but it's basically the same for RedHat family of operations systems also, minus package installation syntax differences.
Install Postgres
Follow the standard Postgres install procedure basically. Use the latest major version available, but minimally v11+ is recommended for the metrics DB due to recent partitioning speedup improvements and also older versions were missing some default JSONB casts so that a few built-in Grafana dashboards need adjusting otherwise.
To get the latest Postgres versions, official Postgres PGDG repos are to be preferred over default disto repos. Follow the instructions from:
Install pgwatch - either from pre-built packages or by compiling the Go code
Using pre-built packages
The pre-built DEB / RPM / Tar packages are available on the GitHub releases page.
# find out the latest package link and replace below, using v1.8.0 here\nwget https://github.com/cybertec-postgresql/pgwatch/releases/download/v1.8.0/pgwatch_v1.8.0-SNAPSHOT-064fdaf_linux_64-bit.deb\nsudo dpkg -i pgwatch_v1.8.0-SNAPSHOT-064fdaf_linux_64-bit.deb\n
Compiling the Go code yourself
This method of course is not needed unless dealing with maximum security environments or some slight code changes are required.
Install Go by following the official instructions
Get the pgwatch project's code and compile the gatherer daemon
git clone https://github.com/cybertec-postgresql/pgwatch.git\ncd pgwatch/internal/webui\nyarn install --network-timeout 100000 && yarn build\ncd ..\ngo build\n
After fetching all the Go library dependencies (can take minutes) an executable named \"pgwatch\" should be generated. Additionally, it's a good idea to copy it to /usr/bin/pgwatch
.
Configure a SystemD auto-start service (optional)
Sample startup scripts can be found at /etc/pgwatch/startup-scripts/pgwatch.service or online here. Note that they are OS-agnostic and might need some light adjustment of paths, etc. - so always test them out.
Boostrap the config DB
Create a user to \"own\" the pgwatch
schema
Typically called pgwatch
but can be anything really, if the schema creation file is adjusted accordingly.
psql -c \"create user pgwatch password 'xyz'\"\npsql -c \"create database pgwatch owner pgwatch\"\n
Roll out the pgwatch config schema
The schema will most importantly hold connection strings of DBs to be monitored and the metric definitions.
# FYI - one could get the below schema files also directly from GitHub\n# if re-using some existing remote Postgres instance where pgwatch was not installed\npsql -f /etc/pgwatch/sql/config_store/config_store.sql pgwatch\npsql -f /etc/pgwatch/sql/config_store/metric_definitions.sql pgwatch\n
Bootstrap the measurements storage DB
Create a dedicated database for storing metrics and a user to \"own\" the metrics schema
Here again default scripts expect a role named pgwatch
but can be anything if to adjust the scripts.
psql -c \"create database pgwatch_metrics owner pgwatch\"\n
Roll out the pgwatch metrics storage schema
This is a place to pause and first think how many databases will be monitored, i.e. how much data generated, and based on that one should choose a suitable metrics storage schema. There are a couple of different options available that are described here in detail, but the gist of it is that you don't want partitioning schemes too complex if you don't have zounds of data and don't need the fastest queries. For a smaller amount of monitored DBs (a couple dozen to a hundred) the default \"metric-time\" is a good choice. For hundreds of databases, aggressive intervals, or long term storage usage of the TimescaleDB extension is recommended.
cd /etc/pgwatch/sql/metric_store\npsql -f roll_out_metric_time.psql pgwatch_metrics\n
Note
Default retention for Postgres storage is 2 weeks! To change, use the --pg-retention-days / PW_PG_RETENTION_DAYS
gatherer parameter.
Prepare the \"to-be-monitored\" databases for metrics collection
As a minimum we need a plain unprivileged login user. Better though is to grant the user also the pg_monitor
system role, available on v10+. Superuser privileges should be normally avoided for obvious reasons of course, but for initial testing in safe environments it can make the initial preparation (automatic helper rollouts) a bit easier still, given superuser privileges are later stripped.
To get most out of your metrics some SECURITY DEFINER
wrappers functions called \"helpers\" are recommended on the DB-s under monitoring. See the detailed chapter on the \"preparation\" topic here for more details.
Configure DB-s and metrics / intervals to be monitored
pgwatch.monitored_db
tableStart the pgwatch metrics collection agent
The gatherer has quite some parameters (use the --help
flag to show them all), but simplest form would be:
pgwatch-daemon \\\n --host=localhost --user=pgwatch --dbname=pgwatch \\\n --datastore=postgres --pg-metric-store-conn-str=postgresql://pgwatch@localhost:5432/pgwatch_metrics \\\n --verbose=info\n
Default connections params expect a trusted localhost Config DB setup so mostly the 2nd line is not needed, actually.
Or via SystemD if set up in previous steps
useradd -m -s /bin/bash pgwatch # default SystemD templates run under the pgwatch user\nsudo systemctl start pgwatch\nsudo systemctl status pgwatch\n
After initial verification that all works it's usually good idea to set verbosity back to default by removing the verbose flag.
Another tip to configure connection strings inside SystemD service files is to use the \"systemd-escape\" utility to escape special characters like spaces etc. if using the LibPQ connect string syntax rather than JDBC syntax.
Monitor the console or log output for any problems
If you see metrics trickling into the \"pgwatch_metrics\" database (metric names are mapped to table names and tables are auto-created), then congratulations - the deployment is working! When using some more aggressive preset metrics config then there are usually still some errors though, due to the fact that some more extensions or privileges are missing on the monitored database side. See the according chapter here.
Info
When you're compiling your own gatherer then the executable file will be named just pgwatch
instead of pgwatch-daemon
to avoid mixups.
Install Grafana
Create a Postgres database to hold Grafana internal config, like dashboards etc.
Theoretically it's not absolutely required to use Postgres for storing Grafana internal settings / dashboards, but doing so has 2 advantages - you can easily roll out all pgwatch built-in dashboards and one can also do remote backups of the Grafana configuration easily.
psql -c \"create user pgwatch_grafana password 'xyz'\"\npsql -c \"create database pgwatch_grafana owner pgwatch_grafana\"\n
Follow the instructions from https://grafana.com/docs/grafana/latest/installation/debian/, basically something like:
wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -\necho \"deb https://packages.grafana.com/oss/deb stable main\" | sudo tee -a /etc/apt/sources.list.d/grafana.list\nsudo apt-get update && sudo apt-get install grafana\n\n# review / change config settings and security, etc\nsudo vi /etc/grafana/grafana.ini\n\n# start and enable auto-start on boot\nsudo systemctl daemon-reload\nsudo systemctl start grafana-server\nsudo systemctl status grafana-server\n
Default Grafana port: 3000
Configure Grafana config to use our pgwatch_grafana
DB
Place something like below in the [database]
section of /etc/grafana/grafana.ini
[database]\ntype = postgres\nhost = my-postgres-db:5432\nname = pgwatch_grafana\nuser = pgwatch_grafana\npassword = xyz\n
Taking a look at [server], [security]
and [auth*]
sections is also recommended.
Set up the pgwatch
metrics database as the default datasource
We need to tell Grafana where our metrics data is located. Add a datasource via the Grafana UI (Admin -> Data sources) or adjust and execute the \"pgwatch/bootstrap/grafana_datasource.sql\" script on the pgwatch_grafana
DB.
Add pgwatch predefined dashboards to Grafana
This could be done by importing the pgwatch dashboard definition JSONs manually, one by one, from the \"grafana\" folder (\"Import Dashboard\" from the Grafana top menu) or via as small helper script located at /etc/pgwatch/grafana-dashboards/import_all.sh. The script needs some adjustment for metrics storage type, connect data and file paths.
Optionally install also Grafana plugins
Currently, one pre-configured dashboard (Biggest relations treemap) use an extra plugin - if planning to that dash, then run the following:
grafana-cli plugins install savantly-heatmap-panel\n
Start discovering the preset dashbaords
If the previous step of launching pgwatch daemon succeeded, and it was more than some minutes ago, one should already see some graphs on dashboards like \"DB overview\" or \"DB overview Unprivileged / Developer mode\" for example.
From v1.4 one can also deploy the pgwatch gatherer daemons more easily in a de-centralized way, by specifying monitoring configuration via YAML files. In that case there is no need for a central Postgres \"config DB\".
YAML installation steps
Relevant gatherer parameters / env. vars: --config / PW_CONFIG
and --metrics-folder / PW_METRICS_FOLDER
.
For details on individual steps like installing pgwatch see the above paragraph.
"},{"location":"tutorial/docker_installation.html","title":"Installing using Docker","text":""},{"location":"tutorial/docker_installation.html#simple-setup-steps","title":"Simple setup steps","text":"The simplest real-life pgwatch setup should look something like that:
Decide which metrics storage engine you want to use - cybertecpostgresql/pgwatch-demo uses PostgreSQL. When only Prometheus sink is used (exposing a port for remote scraping), one should use the slimmer cybertecpostgresql/pgwatch image which doesn't have any built in databases.
Find the latest pgwatch release version by going to the project's GitHub Releases page or use the public API with something like that:
curl -so- https://api.github.com/repos/cybertec-postgresql/pgwatch/releases/latest | jq .tag_name | grep -oE '[0-9\\.]+'\n
docker pull cybertecpostgresql/pgwatch-demo:X.Y.Z\n
docker run -d --restart=unless-stopped -p 3000:3000 -p 8080:8080 \\\n--name pw3 cybertecpostgresql/pgwatch-demo:X.Y.Z\n
Note that we're setting the container to be automatically restarted\nin case of a reboot/crash - which is highly recommended if not using\nsome container management framework to run pgwatch.\n
"},{"location":"tutorial/docker_installation.html#more-future-proof-setup-steps","title":"More future-proof setup steps","text":"Although the above simple setup example will do for more temporal setups / troubleshooting sessions, for permanent setups it's highly recommended to create separate volumes for all software components in the container, so that it would be easier to update to newer pgwatch Docker images and pull file system based backups, and also it might be a good idea to expose all internal ports at least on localhost for possible troubleshooting and making possible to use native backup tools more conveniently for Postgres.
Note that, for maximum flexibility, security and update simplicity it's best to do a custom setup though - see the next chapter for that.
So in short, for plain Docker setups would be best to do something like:
# let's create volumes for Postgres, Grafana and pgwatch marker files / SSL certificates\nfor v in pg grafana pw3 ; do docker volume create $v ; done\n\n# launch pgwatch with fully exposed Grafana and Health-check ports\n# and local Postgres and subnet level Web UI ports\ndocker run -d --restart=unless-stopped --name pw3 \\\n -p 3000:3000 -p 8081:8081 -p 127.0.0.1:5432:5432 -p 192.168.1.XYZ:8080:8080 \\\n -v pg:/var/lib/postgresql -v grafana:/var/lib/grafana -v pw3:/pgwatch/persistent-config \\\n cybertecpostgresql/pgwatch-demo:X.Y.Z\n
Note that in non-trusted environments it's a good idea to specify more sensitive ports together with some explicit network interfaces for additional security - by default Docker listens on all network devices!
Also note that one can configure many aspects of the software components running inside the container via ENV - for a complete list of all supported Docker environment variables see the ENV_VARIABLES.md file.
"},{"location":"tutorial/docker_installation.html#available-docker-images","title":"Available Docker images","text":"Following images are regularly pushed to Docker Hub:
cybertecpostgresql/pgwatch-demo
The original pgwatch \u201cbatteries-included\u201d image with PostgreSQL measurements storage. Just insert connect infos to your database via the admin Web UI (or directly into the Config DB) and then turn to the pre-defined Grafana dashboards to analyze DB health and performance.
cybertecpostgresql/pgwatch
A light-weight image containing only the metrics collection daemon / agent, that can be integrated into the monitoring setup over configuration specified either via ENV, mounted YAML files or a PostgreSQL Config DB. See the Component reuse chapter for wiring details.
"},{"location":"tutorial/docker_installation.html#building-custom-docker-images","title":"Building custom Docker images","text":"For custom tweaks, more security, specific component versions, etc. one could easily build the images themselves, just a Docker installation is needed.
"},{"location":"tutorial/docker_installation.html#interacting-with-the-docker-container","title":"Interacting with the Docker container","text":"If launched with the PW_TESTDB=1
env. parameter then the pgwatch configuration database running inside Docker is added to the monitoring, so that you should immediately see some metrics at least on the Health-check dashboard.
To add new databases / instances to monitoring open the administration Web interface on port 8080 (or some other port, if re-mapped at launch) and go to the SOURCES page. Note that the Web UI is an optional component, and one can manage monitoring entries directly in the Postgres Config DB via INSERT
/ UPDATE
into \"pgwatch.monitored_db\"
table. Default user/password are again pgwatch/pgwatchadmin
, database name - pgwatch
. In both cases note that it can take up to 2min (default main loop time, changeable via PW_SERVERS_REFRESH_LOOP_SECONDS
) before you see any metrics for newly inserted databases.
One can edit existing or create new Grafana dashboards, change Grafana global settings, create users, alerts, etc. after logging in as pgwatch/pgwatchadmin
(by default, changeable at launch time).
Metrics and their intervals that are to be gathered can be customized for every database separately via a custom JSON config field or more conveniently by using Preset Configs, like \"minimal\", \"basic\" or \"exhaustive\" (monitored_db.preset_config
table), where the name should already hint at the amount of metrics gathered. For privileged users the \"exhaustive\" preset is a good starting point, and \"unprivileged\" for simple developer accounts.
To add a new metrics yourself (which are simple SQL queries returning any values and a timestamp) head to http://127.0.0.1:8080/metrics. The queries should always include a \"epoch_ns\"
column and \"tag\\_\"
prefix can be used for columns that should be quickly searchable/groupable, and thus will be indexed with the PostgreSQL metric stores. See to the bottom of the \"metrics\" page for more explanations or the documentation chapter on metrics.
For a quickstart on dashboarding, a list of available metrics together with some instructions are presented on the \"Documentation\" dashboard.
Some built-in metrics like \"cpu_load\"
and others, that gather privileged or OS statistics, require installing helper functions (looking like that), so it might be normal to see some blank panels or fetching errors in the logs. On how to prepare databases for monitoring see the Monitoring preparations chapter.
For effective graphing you want to familiarize yourself with the query language of the database system that was selected for metrics storage. Some tips to get going:
max() - min()
aggregates on cumulative counters (most data provided by Postgres is cumulative) would lie.For possible troubleshooting needs, logs of the components running inside Docker are by default (if not disabled on container launch) visible under: http://127.0.0.1:8080/logs/%5Bpgwatch%7Cpostgres%7Cwebui%7Cgrafana. It's of course also possible to log into the container and look at log files directly - they're situated under /var/logs/supervisor/
.
FYI - docker logs ...
command is not really useful after a successful container startup in pgwatch case.
As mentioned in the Components chapter, remember that the pre-built Docker images are just one example how your monitoring setup around the pgwatch metrics collector could be organized. For another example how various components (as Docker images here) can work together, see a Docker Compose example with loosely coupled components here.
"},{"location":"tutorial/docker_installation.html#example-of-advanced-setup-using-yaml-files-and-dual-sinks","title":"Example of advanced setup using YAML files and dual sinks:","text":"pgwatch service in file docker/docker-compose.yml
can look like this:
pgwatch:\n image: cybertecpostgresql/pgwatch:latest\n command:\n - \"--web-disable=true\"\n - \"--sources=/sources.yaml\"\n - \"--sink=postgresql://pgwatch@postgres:5432/pgwatch_metrics\"\n - \"--sink=prometheus://:8080\"\n volumes:\n - \"./sources.yaml:/sources.yaml\"\n ports:\n - \"8080:8080\"\n depends_on:\n postgres:\n condition: service_healthy\n
Source file sources.yaml
in the same directory:
- name: demo\n conn_str: postgresql://pgwatch:pgwatchadmin@postgres/pgwatch'\n preset_metrics: exhaustive\n is_enabled: true\n group: default\n
Running this setup you get pgwatch that uses sources from YAML file and outputs measurements to postgres DB and exposes them for Prometheus to scrape on port 8080 instead of WebUI (which is disabled by --web-disable
). Metrics definition are built-in, you can examine definition in internal/metrics/metrics.yaml
.
As a base requirement you'll need a login user (non-superuser suggested) for connecting to your server and fetching metrics.
Though theoretically you can use any username you like, but if not using \"pgwatch\" you need to adjust the \"helper\" creation SQL scripts (see below for explanation) accordingly, as in those by default the \"pgwatch\" will be granted execute privileges.
CREATE ROLE pgwatch WITH LOGIN PASSWORD 'secret';\n-- For critical databases it might make sense to ensure that the user account\n-- used for monitoring can only open a limited number of connections\n-- (there are according checks in code, but multiple instances might be launched)\nALTER ROLE pgwatch CONNECTION LIMIT 3;\nGRANT pg_monitor TO pgwatch;\nGRANT CONNECT ON DATABASE mydb TO pgwatch;\nGRANT EXECUTE ON FUNCTION pg_stat_file(text) to pgwatch; -- for wal_size metric\n
For most monitored databases it's extremely beneficial (for troubleshooting performance issues) to also activate the pg_stat_statements extension which will give us exact \"per query\" performance aggregates and also enables to calculate how many queries are executed per second for example. In pgwatch context it powers the \"Stat statements Top\" dashboard and many other panels of other dashboards. For additional troubleshooting benefits also the track_io_timing setting should be enabled.
Make sure the Postgres contrib package is installed (should be installed automatically together with the Postgres server package on Debian based systems).
yum install -y postgresqlXY-contrib
apt install postgresql-contrib
Add pg_stat_statements
to your server config (postgresql.conf) and restart the server.
shared_preload_libraries = 'pg_stat_statements'\ntrack_io_timing = on\n
After restarting activate the extension in the monitored DB. Assumes Postgres superuser.
psql -c \"CREATE EXTENSION IF NOT EXISTS pg_stat_statements\"\n
Helper functions in pgwatch context are standard Postgres stored procedures, running under SECURITY DEFINER
privileges. Via such wrapper functions one can do controlled privilege escalation - i.e. to give access to protected Postgres metrics (like active session details, \"per query\" statistics) or even OS-level metrics, to normal unprivileged users, like the pgwatch monitoring role.
If using a superuser login (recommended only for local \"push\" setups) you have full access to all Postgres metrics and would need helpers only for OS remote statistics. For local (push) setups as of pgwatch version 1.8.4 the most typical OS metrics are covered by the --direct-os-stats
flag, explained below.
For unprivileged monitoring users it is highly recommended to take these additional steps on the \"to be monitored\" database to get maximum value out of pgwatch in the long run. Without these additional steps, you lose though about 10-15% of built-in metrics, which might not be too tragical nevertheless. For that use case there's also a preset config named \"unprivileged\".
When monitoring v10+ servers then the built-in pg_monitor system role is recommended for the monitoring user, which almost substitutes superuser privileges for monitoring purposes in a safe way.
"},{"location":"tutorial/preparing_databases.html#rolling-out-common-helpers","title":"Rolling out common helpers","text":"For completely unprivileged monitoring users the following helpers are recommended to make good use of the default \"exhaustive\" Preset Config:
export PGUSER=superuser\npsql -f /etc/pgwatch/metrics/00_helpers/get_stat_activity/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_stat_replication/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_wal_size/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_stat_statements/$pgver/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_sequences/$pgver/metric.sql mydb\n
Note that there might not be an exact Postgres version match for helper definitions - then replace \\$pgver with the previous available version number below your server's Postgres version number.
Also note that as of v1.8.1 some helpers definition SQLs scripts (like for \"get_stat_statements\") will inspect also the \"search_path\" and by default will not install into schemas that have PUBLIC CREATE privileges, like the \"public\" schema by default has!
Also when rolling out helpers make sure the search_path
is at defaults or set so that it's also accessible for the monitoring role as currently neither helpers nor metric definition SQLs don't assume any particular schema and depend on the search_path
including everything needed.
For more detailed statistics (OS monitoring, table bloat, WAL size, etc.) it is recommended to install also all other helpers found from the /etc/pgwatch/metrics/00_helpers
folder or do it automatically by using the rollout_helper.py script found in the 00_helpers folder.
As of v1.6.0 though helpers are not needed for Postgres-native metrics (e.g. WAL size) if a privileged user (superuser or pg_monitor GRANT) is used, as pgwatch now supports having 2 SQL definitions for each metric - \"normal / unprivileged\" and \"privileged\" / \"superuser\". In the file system /etc/pgwatch/metrics such \"privileged\" access definitions will have a \"_su\" added to the file name.
"},{"location":"tutorial/preparing_databases.html#automatic-rollout-of-helpers","title":"Automatic rollout of helpers","text":"pgwatch can roll out helpers also automatically on the monitored DB. This requires superuser privileges and a configuration attribute for the monitored DB. In YAML config mode it's called is_superuser, in Config DB md_is_superuser, in the Web UI one can tick the \"Auto-create helpers\" checkbox.
After the automatic rollout it's still generally recommended to remove the superuser privileges from the monitoring role, which now should have GRANTs to all automatically created helper functions. Note though that all created helpers will not be immediately usable as some are for special purposes and need additional dependencies.
A hint: if it can be foreseen that a lot of databases will be created on some instance (generally not a good idea though) it might be a good idea to roll out the helpers directly in the template1 database - so that all newly created databases will get them automatically.
"},{"location":"tutorial/preparing_databases.html#plpython-helpers","title":"PL/Python helpers","text":"PostgreSQL in general is implemented in such a way that it does not know too much about the operating system that it is running on. This is a good thing for portability but can be somewhat limiting for monitoring, especially when there is no system monitoring framework in place or the data is not conveniently accessible together with metrics gathered from Postgres. To overcome this problem, users can also choose to install helpers extracting OS metrics like CPU, RAM usage, etc. so that this data is stored together with Postgres-native metrics for easier graphing / correlation / alerting. This also enable to be totally independent of any System Monitoring tools like Zabbix, etc., with the downside that everything is gathered over Postgres connections so that when Postgres is down no OS metrics can be gathered also. Since v1.8.4 though the latter problem can be reduced for local \"push\" based setups via the --direct-os-stats
option plus according metrics configuration (e.g. the \"full\" preset).
Note though that PL/Python is usually disabled by DB-as-a-service providers like AWS RDS for security reasons.
# first install the Python bindings for Postgres\napt install postgresql-plpython3-XY\n# yum install postgresqlXY-plpython3\n\npsql -c \"CREATE EXTENSION plpython3u\"\npsql -f /etc/pgwatch/metrics/00_helpers/get_load_average/9.1/metric.sql mydb\n\n# psutil helpers are only needed when full set of common OS metrics is wanted\napt install python3-psutil\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_cpu/9.1/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_mem/9.1/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_disk/9.1/metric.sql mydb\npsql -f /etc/pgwatch/metrics/00_helpers/get_psutil_disk_io_total/9.1/metric.sql mydb\n
Note that we're assuming here that we're on a modern Linux system with Python 3 as default. For older systems Python 3 might not be an option though, so you need to change plpython3u to plpythonu and also do the same replace inside the code of the actual helper functions! Here the rollout_helper.py script with it's --python2
flag can be helpful again.
pg_monitor
, that are exactly meant to be used for such cases where we want to give access to all Statistics Collector views without any other \"superuser behaviour\". See here for documentation on such special system roles. Note that currently most out-of-the-box metrics first rely on the helpers as v10 is relatively new still, and only when fetching fails, direct access with the \"Privileged SQL\" is tried.--direct-os-stats
parameter to signal that we can fetch the data for the default psutil*
metrics directly from OS counters. If direct OS fetching fails though, the fallback is still to try via PL/Python wrappers.pg_upgrade
, this could result in error messages thrown. Then just drop those failing helpers on the \"to be upgraded\" cluster and re-create them after the upgrade process.As mentioned above, helper / wrapper functions are not strictly needed, they just provide a bit more information for unprivileged users - thus for developers with no means to install any wrappers as superuser, it's also possible to benefit from pgwatch - for such use cases e.g. the \"unprivileged\" preset metrics profile and the according \"DB overview Unprivileged / Developer\" are a good starting point as it only assumes existence of pg_stat_statements
(which should be available by all cloud providers).
When adding a new \"to be monitored\" entry a source type needs to be selected. Following types are available:
"},{"location":"tutorial/preparing_databases.html#postgres","title":"postgres","text":"Monitor a single database on a single Postgres instance. When using the Web UI and the \"DB name\" field is left empty, there's as a one time operation where all non-template DB names are fetched, prefixed with \"Unique name\" field value and added to monitoring (if not already monitored). Internally monitoring always happens \"per DB\" not \"per cluster\" though.
"},{"location":"tutorial/preparing_databases.html#postgres-continuous-discovery","title":"postgres-continuous-discovery","text":"Monitor a whole (or subset of DB-s) of Postgres cluster / instance. Host information without a DB name needs to be specified and then the pgwatch daemon will periodically scan the cluster and add any found and not yet monitored DBs to monitoring. In this mode it's also possible to specify regular expressions to include/exclude some database names.
"},{"location":"tutorial/preparing_databases.html#pgbouncer","title":"pgbouncer","text":"Use to track metrics from PgBouncer's SHOW STATS
command. In place of the Postgres \"DB name\" the name of the PgBouncer \"pool\" to be monitored must be inserted.
Use to track joint metrics from Pgpool2's SHOW POOL_NODES
and SHOW POOL_PROCESSES
commands. Pgpool2 from version 3.0 is supported.
Patroni is a HA / cluster manager for Postgres that relies on a DCS (Distributed Consensus Store) to store it's state. Typically, in such a setup the nodes come and go, and also it should not matter who is currently the master. To make it easier to monitor such dynamic constellations pgwatch supports reading of cluster node info from all supported DCSs (etcd, Zookeeper, Consul), but currently only for simpler cases with no security applied (which is actually the common case in a trusted environment).
"},{"location":"tutorial/preparing_databases.html#patroni-continuous-discovery","title":"patroni-continuous-discovery","text":"As normal patroni DB type but all DBs (or only those matching the regex if any provided) are monitored.
"},{"location":"tutorial/preparing_databases.html#patroni-namespace-discovery","title":"patroni-namespace-discovery","text":"Similar to patroni-continuous-discovery but all Patroni scopes (clusters) of an ETCD namespace are automatically monitored. Optionally regexes on database names still apply if provided.
Notice
All \"continuous\" modes expect access to \"template1\" or \"postgres\" databases of the specified cluster to determine the database names residing there.
"},{"location":"tutorial/upgrading.html","title":"Upgrading","text":"The pgwatch daemon code doesn't need too much maintenance itself (if you're not interested in new features), but the preset metrics, dashboards and the other components that pgwatch relies on, like Grafana, are under very active development and get updates quite regularly so already purely from the security standpoint it would make sense to stay up to date.
We also regularly include new component versions in the Docker images after verifying that they work. If using Docker, you could also choose to build your own images any time some new component versions are released, just increment the version numbers in the Dockerfile.
"},{"location":"tutorial/upgrading.html#updating-to-a-newer-docker-version","title":"Updating to a newer Docker version","text":""},{"location":"tutorial/upgrading.html#without-volumes","title":"Without volumes","text":"If pgwatch container was started in the simplest way possible without volumes, and if previously gathered metrics are not of great importance, and there are no user modified metric or dashboard changes that should be preserved, then the easiest way to get the latest components would be just to launch new container and import the old monitoring config:
# let's backup up the monitored hosts\npsql -p5432 -U pgwatch -d pgwatch -c \"\\copy monitored_db to 'monitored_db.copy'\"\n\n# stop the old container and start a new one ...\ndocker stop ... && docker run ....\n\n# import the monitored hosts\npsql -p5432 -U pgwatch -d pgwatch -c \"\\copy monitored_db from 'monitored_db.copy'\"\n
If metrics data and other settings like custom dashboards need to be preserved then some more steps are needed, but basically it's about pulling Postgres backups and restoring them into the new container.
A tip: to make the restore process easier it would already make sense to mount the host folder with the backups in it on the new container with \"-v \\~/pgwatch_backups:/pgwatch_backups:rw,z\"
when starting the Docker image. Otherwise, one needs to set up SSH or use something like S3 for example. Also note that port 5432 need to be exposed to take backups outside of Docker for Postgres respectively.
To make updates a bit easier, the preferred way to launch pgwatch should be to use Docker volumes for each individual component - see the Installing using Docker chapter for details. Then one can just stop the old container and start a new one, re-using the volumes.
With some releases though, updating to newer version might additionally still require manual rollout of Config DB schema migrations scripts, so always check the release notes for hints on that or just go to the \"pgwatch/sql/migrations\"
folder and execute all SQL scripts that have a higher version than the old pgwatch container. Error messages like will \"missing columns\" or \"wrong datatype\" will also hint at that, after launching with a new image. FYI - such SQL \"patches\" are generally not provided for metric updates, nor dashboard changes, and they need to be updated separately.
For a custom installation there's quite some freedom in doing updates - as components (Grafana, PostgreSQL) are loosely coupled, they can be updated any time without worrying too much about the other components. Only \"tightly coupled\" components are the pgwatch metrics collector, config DB and the optional Web UI - if the pgwatch config is kept in the database. If YAML based approach is used, then things are even more simple - the pgwatch daemon can be updated any time as YAML schema has default values for everything and there are no other \"tightly coupled\" components like the Web UI.
"},{"location":"tutorial/upgrading.html#updating-grafana","title":"Updating Grafana","text":"The update process for Grafana looks pretty much like the installation so take a look at the according chapter. If using Grafana package repository it should happen automatically along with other system packages. Grafana has a built-in database schema migrator, so updating the binaries and restarting is enough.
"},{"location":"tutorial/upgrading.html#updating-grafana-dashboards","title":"Updating Grafana dashboards","text":"There are no update or migration scripts for the built-in Grafana dashboards as it would break possible user applied changes. If you know that there are no user changes, then one can just delete or rename the existing ones in bulk and import the latest JSON definitions. See here for some more advice on how to manage dashboards.
"},{"location":"tutorial/upgrading.html#updating-the-config-metrics-db-version","title":"Updating the config / metrics DB version","text":"Database updates can be quite complex, with many steps, so it makes sense to follow the manufacturer's instructions here.
For PostgreSQL one should distinguish between minor version updates and major version upgrades. Minor updates are quite straightforward and problem-free, consisting of running something like:
apt update && apt install postgresql\nsudo systemctl restart postgresql\n
For PostgreSQL major version upgrades one should read through the according release notes (e.g. here) and be prepared for the unavoidable downtime.
"},{"location":"tutorial/upgrading.html#updating-the-pgwatch-schema","title":"Updating the pgwatch schema","text":"This is the pgwatch specific part, with some coupling between the following components - Config DB SQL schema, metrics collector, and the optional Web UI.
Here one should check from the CHANGELOG if pgwatch schema needs updating. If yes, then manual applying of schema diffs is required before running the new gatherer or Web UI. If no, i.e. no schema changes, all components can be updated independently in random order.
pgwatch --config=postgresql://localhost/pgwatch --upgrade\n
"},{"location":"tutorial/upgrading.html#updating-the-metrics-collector","title":"Updating the metrics collector","text":"Compile or install the gatherer from RPM / DEB / tarball packages. See the Custom installation chapter for details.
If using a SystemD service file to auto-start the collector then you might want to also check for possible updates on the template there - /etc/pgwatch/startup-scripts/pgwatch.service
.
In the YAML mode you always get new SQL definitions for the built-in metrics automatically when refreshing the sources via GitHub or pre-built packages, but with Config DB approach one needs to do it manually. Given that there are no user added metrics, it's simple enough though - just delete all old ones and re-insert everything from the latest metric definition SQL file.
pg_dump -t pgwatch.metric pgwatch > old_metric.sql # a just-in-case backup\npsql -c \"truncate pgwatch.metric\" pgwatch\npsql -f /etc/pgwatch/sql/config_store/metric_definitions.sql pgwatch\n
Warning
If you have added some own custom metrics be sure not to delete or truncate them!
"}]} \ No newline at end of file