Merge branch 'MON-494_refactor_auto_update' into 'master'

Resolve MON-494 "Refactor auto update"

Closes MON-494

See merge request claranet/pt-monitoring/projects/datadog/terraform/monitors!95
This commit is contained in:
Quentin Manfroi 2019-08-20 18:38:17 +02:00
commit 88e9a32d39
54 changed files with 800 additions and 712 deletions

View File

@ -9,7 +9,7 @@ auto_update:
image: claranet/datadog-terraform:latest
stage: test
script:
- ./scripts/auto_update.sh ./
- ./scripts/auto_update.sh
- git status
- git diff --exit-code
tags:

View File

@ -10,14 +10,7 @@ To contribute you will need to [report an issue](https://confluence.fr.clara.net
If you would like to resolve an issue or implement new monitors you must follow our [best practices](https://confluence.fr.clara.net/display/DAT/Templates+monitors).
After any change on this repo, you need to run the `./scripts/auto_update.sh ./` command to make sure all is up to date otherwise the CI pipeline will fail:
- the parameter will limit the scripts execution on a specific path on the repository
- on linux system it is possible to run the script directly while `terraform` and `terraform-docs` commands are available in your PATH
- else you can use [the same docker image as the CI](https://hub.docker.com/r/claranet/datadog-terraform) with docker which is available on every platforms
```
docker run --rm -v "$PWD:/work" claranet/datadog-terraform /work/scripts/auto_update.sh ./
```
After any change, you will need to run the [auto update scripts](./scripts/README.md) to make sure all is up to date otherwise the CI pipeline will fail.
## Important notes ##

View File

@ -1,10 +1,10 @@
output "nginx_ingress_too_many_5xx_id" {
description = "id for monitor nginx_ingress_too_many_5xx"
value = datadog_monitor.nginx_ingress_too_many_5xx.*.id
}
output "nginx_ingress_too_many_4xx_id" {
description = "id for monitor nginx_ingress_too_many_4xx"
value = datadog_monitor.nginx_ingress_too_many_4xx.*.id
}
output "nginx_ingress_too_many_5xx_id" {
description = "id for monitor nginx_ingress_too_many_5xx"
value = datadog_monitor.nginx_ingress_too_many_5xx.*.id
}

View File

@ -1,21 +1,11 @@
output "disk_pressure_id" {
description = "id for monitor disk_pressure"
value = datadog_monitor.disk_pressure.*.id
}
output "disk_out_id" {
description = "id for monitor disk_out"
value = datadog_monitor.disk_out.*.id
}
output "memory_pressure_id" {
description = "id for monitor memory_pressure"
value = datadog_monitor.memory_pressure.*.id
}
output "ready_id" {
description = "id for monitor ready"
value = datadog_monitor.ready.*.id
output "disk_pressure_id" {
description = "id for monitor disk_pressure"
value = datadog_monitor.disk_pressure.*.id
}
output "kubelet_ping_id" {
@ -28,9 +18,9 @@ output "kubelet_syncloop_id" {
value = datadog_monitor.kubelet_syncloop.*.id
}
output "unregister_net_device_id" {
description = "id for monitor unregister_net_device"
value = datadog_monitor.unregister_net_device.*.id
output "memory_pressure_id" {
description = "id for monitor memory_pressure"
value = datadog_monitor.memory_pressure.*.id
}
output "node_unschedulable_id" {
@ -38,9 +28,14 @@ output "node_unschedulable_id" {
value = datadog_monitor.node_unschedulable.*.id
}
output "volume_space_id" {
description = "id for monitor volume_space"
value = datadog_monitor.volume_space.*.id
output "ready_id" {
description = "id for monitor ready"
value = datadog_monitor.ready.*.id
}
output "unregister_net_device_id" {
description = "id for monitor unregister_net_device"
value = datadog_monitor.unregister_net_device.*.id
}
output "volume_inodes_id" {
@ -48,3 +43,8 @@ output "volume_inodes_id" {
value = datadog_monitor.volume_inodes.*.id
}
output "volume_space_id" {
description = "id for monitor volume_space"
value = datadog_monitor.volume_space.*.id
}

View File

@ -1,13 +1,13 @@
output "pod_phase_status_id" {
description = "id for monitor pod_phase_status"
value = datadog_monitor.pod_phase_status.*.id
}
output "error_id" {
description = "id for monitor error"
value = datadog_monitor.error.*.id
}
output "pod_phase_status_id" {
description = "id for monitor pod_phase_status"
value = datadog_monitor.pod_phase_status.*.id
}
output "terminated_id" {
description = "id for monitor terminated"
value = datadog_monitor.terminated.*.id

View File

@ -1,25 +1,25 @@
output "job_id" {
description = "id for monitor job"
value = datadog_monitor.job.*.id
}
output "cronjob_id" {
description = "id for monitor cronjob"
value = datadog_monitor.cronjob.*.id
}
output "job_id" {
description = "id for monitor job"
value = datadog_monitor.job.*.id
}
output "replica_available_id" {
description = "id for monitor replica_available"
value = datadog_monitor.replica_available.*.id
}
output "replica_ready_id" {
description = "id for monitor replica_ready"
value = datadog_monitor.replica_ready.*.id
}
output "replica_current_id" {
description = "id for monitor replica_current"
value = datadog_monitor.replica_current.*.id
}
output "replica_ready_id" {
description = "id for monitor replica_ready"
value = datadog_monitor.replica_ready.*.id
}

View File

@ -1,11 +1,6 @@
output "ALB_no_healthy_instances_id" {
description = "id for monitor ALB_no_healthy_instances"
value = datadog_monitor.ALB_no_healthy_instances.*.id
}
output "ALB_latency_id" {
description = "id for monitor ALB_latency"
value = datadog_monitor.ALB_latency.*.id
output "ALB_httpcode_4xx_id" {
description = "id for monitor ALB_httpcode_4xx"
value = datadog_monitor.ALB_httpcode_4xx.*.id
}
output "ALB_httpcode_5xx_id" {
@ -13,9 +8,9 @@ output "ALB_httpcode_5xx_id" {
value = datadog_monitor.ALB_httpcode_5xx.*.id
}
output "ALB_httpcode_4xx_id" {
description = "id for monitor ALB_httpcode_4xx"
value = datadog_monitor.ALB_httpcode_4xx.*.id
output "ALB_httpcode_target_4xx_id" {
description = "id for monitor ALB_httpcode_target_4xx"
value = datadog_monitor.ALB_httpcode_target_4xx.*.id
}
output "ALB_httpcode_target_5xx_id" {
@ -23,8 +18,13 @@ output "ALB_httpcode_target_5xx_id" {
value = datadog_monitor.ALB_httpcode_target_5xx.*.id
}
output "ALB_httpcode_target_4xx_id" {
description = "id for monitor ALB_httpcode_target_4xx"
value = datadog_monitor.ALB_httpcode_target_4xx.*.id
output "ALB_latency_id" {
description = "id for monitor ALB_latency"
value = datadog_monitor.ALB_latency.*.id
}
output "ALB_no_healthy_instances_id" {
description = "id for monitor ALB_no_healthy_instances"
value = datadog_monitor.ALB_no_healthy_instances.*.id
}

View File

@ -3,13 +3,13 @@ output "API_Gateway_latency_id" {
value = datadog_monitor.API_Gateway_latency.*.id
}
output "API_http_5xx_errors_count_id" {
description = "id for monitor API_http_5xx_errors_count"
value = datadog_monitor.API_http_5xx_errors_count.*.id
}
output "API_http_4xx_errors_count_id" {
description = "id for monitor API_http_4xx_errors_count"
value = datadog_monitor.API_http_4xx_errors_count.*.id
}
output "API_http_5xx_errors_count_id" {
description = "id for monitor API_http_5xx_errors_count"
value = datadog_monitor.API_http_5xx_errors_count.*.id
}

View File

@ -3,6 +3,16 @@ output "elasticache_eviction_id" {
value = datadog_monitor.elasticache_eviction.*.id
}
output "elasticache_eviction_growing_id" {
description = "id for monitor elasticache_eviction_growing"
value = datadog_monitor.elasticache_eviction_growing.*.id
}
output "elasticache_free_memory_id" {
description = "id for monitor elasticache_free_memory"
value = datadog_monitor.elasticache_free_memory.*.id
}
output "elasticache_max_connection_id" {
description = "id for monitor elasticache_max_connection"
value = datadog_monitor.elasticache_max_connection.*.id
@ -18,13 +28,3 @@ output "elasticache_swap_id" {
value = datadog_monitor.elasticache_swap.*.id
}
output "elasticache_free_memory_id" {
description = "id for monitor elasticache_free_memory"
value = datadog_monitor.elasticache_free_memory.*.id
}
output "elasticache_eviction_growing_id" {
description = "id for monitor elasticache_eviction_growing"
value = datadog_monitor.elasticache_eviction_growing.*.id
}

View File

@ -1,10 +1,10 @@
output "memcached_get_hits_id" {
description = "id for monitor memcached_get_hits"
value = datadog_monitor.memcached_get_hits.*.id
}
output "memcached_cpu_high_id" {
description = "id for monitor memcached_cpu_high"
value = datadog_monitor.memcached_cpu_high.*.id
}
output "memcached_get_hits_id" {
description = "id for monitor memcached_get_hits"
value = datadog_monitor.memcached_get_hits.*.id
}

View File

@ -3,6 +3,11 @@ output "redis_cache_hits_id" {
value = datadog_monitor.redis_cache_hits.*.id
}
output "redis_commands_id" {
description = "id for monitor redis_commands"
value = datadog_monitor.redis_commands.*.id
}
output "redis_cpu_high_id" {
description = "id for monitor redis_cpu_high"
value = datadog_monitor.redis_cpu_high.*.id
@ -13,8 +18,3 @@ output "redis_replication_lag_id" {
value = datadog_monitor.redis_replication_lag.*.id
}
output "redis_commands_id" {
description = "id for monitor redis_commands"
value = datadog_monitor.redis_commands.*.id
}

View File

@ -3,13 +3,13 @@ output "es_cluster_status_id" {
value = datadog_monitor.es_cluster_status.*.id
}
output "es_free_space_low_id" {
description = "id for monitor es_free_space_low"
value = datadog_monitor.es_free_space_low.*.id
}
output "es_cpu_90_15min_id" {
description = "id for monitor es_cpu_90_15min"
value = datadog_monitor.es_cpu_90_15min.*.id
}
output "es_free_space_low_id" {
description = "id for monitor es_free_space_low"
value = datadog_monitor.es_free_space_low.*.id
}

View File

@ -1,3 +1,8 @@
output "ELB_backend_latency_id" {
description = "id for monitor ELB_backend_latency"
value = datadog_monitor.ELB_backend_latency.*.id
}
output "ELB_no_healthy_instances_id" {
description = "id for monitor ELB_no_healthy_instances"
value = datadog_monitor.ELB_no_healthy_instances.*.id
@ -8,23 +13,18 @@ output "ELB_too_much_4xx_id" {
value = datadog_monitor.ELB_too_much_4xx.*.id
}
output "ELB_too_much_5xx_id" {
description = "id for monitor ELB_too_much_5xx"
value = datadog_monitor.ELB_too_much_5xx.*.id
}
output "ELB_too_much_4xx_backend_id" {
description = "id for monitor ELB_too_much_4xx_backend"
value = datadog_monitor.ELB_too_much_4xx_backend.*.id
}
output "ELB_too_much_5xx_id" {
description = "id for monitor ELB_too_much_5xx"
value = datadog_monitor.ELB_too_much_5xx.*.id
}
output "ELB_too_much_5xx_backend_id" {
description = "id for monitor ELB_too_much_5xx_backend"
value = datadog_monitor.ELB_too_much_5xx_backend.*.id
}
output "ELB_backend_latency_id" {
description = "id for monitor ELB_backend_latency"
value = datadog_monitor.ELB_backend_latency.*.id
}

View File

@ -1,8 +1,3 @@
output "apimgt_status_id" {
description = "id for monitor apimgt_status"
value = datadog_monitor.apimgt_status.*.id
}
output "apimgt_failed_requests_id" {
description = "id for monitor apimgt_failed_requests"
value = datadog_monitor.apimgt_failed_requests.*.id
@ -13,9 +8,9 @@ output "apimgt_other_requests_id" {
value = datadog_monitor.apimgt_other_requests.*.id
}
output "apimgt_unauthorized_requests_id" {
description = "id for monitor apimgt_unauthorized_requests"
value = datadog_monitor.apimgt_unauthorized_requests.*.id
output "apimgt_status_id" {
description = "id for monitor apimgt_status"
value = datadog_monitor.apimgt_status.*.id
}
output "apimgt_successful_requests_id" {
@ -23,3 +18,8 @@ output "apimgt_successful_requests_id" {
value = datadog_monitor.apimgt_successful_requests.*.id
}
output "apimgt_unauthorized_requests_id" {
description = "id for monitor apimgt_unauthorized_requests"
value = datadog_monitor.apimgt_unauthorized_requests.*.id
}

View File

@ -1,11 +1,6 @@
output "appservices_response_time_id" {
description = "id for monitor appservices_response_time"
value = datadog_monitor.appservices_response_time.*.id
}
output "appservices_memory_usage_count_id" {
description = "id for monitor appservices_memory_usage_count"
value = datadog_monitor.appservices_memory_usage_count.*.id
output "appservices_http_4xx_errors_count_id" {
description = "id for monitor appservices_http_4xx_errors_count"
value = datadog_monitor.appservices_http_4xx_errors_count.*.id
}
output "appservices_http_5xx_errors_count_id" {
@ -13,16 +8,21 @@ output "appservices_http_5xx_errors_count_id" {
value = datadog_monitor.appservices_http_5xx_errors_count.*.id
}
output "appservices_http_4xx_errors_count_id" {
description = "id for monitor appservices_http_4xx_errors_count"
value = datadog_monitor.appservices_http_4xx_errors_count.*.id
}
output "appservices_http_success_status_rate_id" {
description = "id for monitor appservices_http_success_status_rate"
value = datadog_monitor.appservices_http_success_status_rate.*.id
}
output "appservices_memory_usage_count_id" {
description = "id for monitor appservices_memory_usage_count"
value = datadog_monitor.appservices_memory_usage_count.*.id
}
output "appservices_response_time_id" {
description = "id for monitor appservices_response_time"
value = datadog_monitor.appservices_response_time.*.id
}
output "appservices_status_id" {
description = "id for monitor appservices_status"
value = datadog_monitor.appservices_status.*.id

View File

@ -1,8 +1,3 @@
output "cosmos_db_status_id" {
description = "id for monitor cosmos_db_status"
value = datadog_monitor.cosmos_db_status.*.id
}
output "cosmos_db_4xx_requests_id" {
description = "id for monitor cosmos_db_4xx_requests"
value = datadog_monitor.cosmos_db_4xx_requests.*.id
@ -18,3 +13,8 @@ output "cosmos_db_scaling_id" {
value = datadog_monitor.cosmos_db_scaling.*.id
}
output "cosmos_db_status_id" {
description = "id for monitor cosmos_db_status"
value = datadog_monitor.cosmos_db_status.*.id
}

View File

@ -1,13 +1,13 @@
output "eventgrid_no_successful_message_id" {
description = "id for monitor eventgrid_no_successful_message"
value = datadog_monitor.eventgrid_no_successful_message.*.id
}
output "eventgrid_failed_messages_id" {
description = "id for monitor eventgrid_failed_messages"
value = datadog_monitor.eventgrid_failed_messages.*.id
}
output "eventgrid_no_successful_message_id" {
description = "id for monitor eventgrid_no_successful_message"
value = datadog_monitor.eventgrid_no_successful_message.*.id
}
output "eventgrid_unmatched_events_id" {
description = "id for monitor eventgrid_unmatched_events"
value = datadog_monitor.eventgrid_unmatched_events.*.id

View File

@ -1,6 +1,6 @@
output "eventhub_status_id" {
description = "id for monitor eventhub_status"
value = datadog_monitor.eventhub_status.*.id
output "eventhub_errors_id" {
description = "id for monitor eventhub_errors"
value = datadog_monitor.eventhub_errors.*.id
}
output "eventhub_failed_requests_id" {
@ -8,8 +8,8 @@ output "eventhub_failed_requests_id" {
value = datadog_monitor.eventhub_failed_requests.*.id
}
output "eventhub_errors_id" {
description = "id for monitor eventhub_errors"
value = datadog_monitor.eventhub_errors.*.id
output "eventhub_status_id" {
description = "id for monitor eventhub_status"
value = datadog_monitor.eventhub_status.*.id
}

View File

@ -1,8 +1,3 @@
output "function_http_5xx_errors_rate_id" {
description = "id for monitor function_http_5xx_errors_rate"
value = datadog_monitor.function_http_5xx_errors_rate.*.id
}
output "function_high_connections_count_id" {
description = "id for monitor function_high_connections_count"
value = datadog_monitor.function_high_connections_count.*.id
@ -13,3 +8,8 @@ output "function_high_threads_count_id" {
value = datadog_monitor.function_high_threads_count.*.id
}
output "function_http_5xx_errors_rate_id" {
description = "id for monitor function_http_5xx_errors_rate"
value = datadog_monitor.function_http_5xx_errors_rate.*.id
}

View File

@ -1,28 +1,8 @@
output "too_many_jobs_failed_id" {
description = "id for monitor too_many_jobs_failed"
value = datadog_monitor.too_many_jobs_failed.*.id
}
output "too_many_list_jobs_failed_id" {
description = "id for monitor too_many_list_jobs_failed"
value = datadog_monitor.too_many_list_jobs_failed.*.id
}
output "too_many_query_jobs_failed_id" {
description = "id for monitor too_many_query_jobs_failed"
value = datadog_monitor.too_many_query_jobs_failed.*.id
}
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
}
output "total_devices_id" {
description = "id for monitor total_devices"
value = datadog_monitor.total_devices.*.id
}
output "too_many_c2d_methods_failed_id" {
description = "id for monitor too_many_c2d_methods_failed"
value = datadog_monitor.too_many_c2d_methods_failed.*.id
@ -38,6 +18,26 @@ output "too_many_c2d_twin_update_failed_id" {
value = datadog_monitor.too_many_c2d_twin_update_failed.*.id
}
output "too_many_d2c_telemetry_egress_dropped_id" {
description = "id for monitor too_many_d2c_telemetry_egress_dropped"
value = datadog_monitor.too_many_d2c_telemetry_egress_dropped.*.id
}
output "too_many_d2c_telemetry_egress_invalid_id" {
description = "id for monitor too_many_d2c_telemetry_egress_invalid"
value = datadog_monitor.too_many_d2c_telemetry_egress_invalid.*.id
}
output "too_many_d2c_telemetry_egress_orphaned_id" {
description = "id for monitor too_many_d2c_telemetry_egress_orphaned"
value = datadog_monitor.too_many_d2c_telemetry_egress_orphaned.*.id
}
output "too_many_d2c_telemetry_ingress_nosent_id" {
description = "id for monitor too_many_d2c_telemetry_ingress_nosent"
value = datadog_monitor.too_many_d2c_telemetry_ingress_nosent.*.id
}
output "too_many_d2c_twin_read_failed_id" {
description = "id for monitor too_many_d2c_twin_read_failed"
value = datadog_monitor.too_many_d2c_twin_read_failed.*.id
@ -48,23 +48,23 @@ output "too_many_d2c_twin_update_failed_id" {
value = datadog_monitor.too_many_d2c_twin_update_failed.*.id
}
output "too_many_d2c_telemetry_egress_dropped_id" {
description = "id for monitor too_many_d2c_telemetry_egress_dropped"
value = datadog_monitor.too_many_d2c_telemetry_egress_dropped.*.id
output "too_many_jobs_failed_id" {
description = "id for monitor too_many_jobs_failed"
value = datadog_monitor.too_many_jobs_failed.*.id
}
output "too_many_d2c_telemetry_egress_orphaned_id" {
description = "id for monitor too_many_d2c_telemetry_egress_orphaned"
value = datadog_monitor.too_many_d2c_telemetry_egress_orphaned.*.id
output "too_many_list_jobs_failed_id" {
description = "id for monitor too_many_list_jobs_failed"
value = datadog_monitor.too_many_list_jobs_failed.*.id
}
output "too_many_d2c_telemetry_egress_invalid_id" {
description = "id for monitor too_many_d2c_telemetry_egress_invalid"
value = datadog_monitor.too_many_d2c_telemetry_egress_invalid.*.id
output "too_many_query_jobs_failed_id" {
description = "id for monitor too_many_query_jobs_failed"
value = datadog_monitor.too_many_query_jobs_failed.*.id
}
output "too_many_d2c_telemetry_ingress_nosent_id" {
description = "id for monitor too_many_d2c_telemetry_ingress_nosent"
value = datadog_monitor.too_many_d2c_telemetry_ingress_nosent.*.id
output "total_devices_id" {
description = "id for monitor total_devices"
value = datadog_monitor.total_devices.*.id
}

View File

@ -1,6 +1,6 @@
output "keyvault_status_id" {
description = "id for monitor keyvault_status"
value = datadog_monitor.keyvault_status.*.id
output "keyvault_api_latency_id" {
description = "id for monitor keyvault_api_latency"
value = datadog_monitor.keyvault_api_latency.*.id
}
output "keyvault_api_result_id" {
@ -8,8 +8,8 @@ output "keyvault_api_result_id" {
value = datadog_monitor.keyvault_api_result.*.id
}
output "keyvault_api_latency_id" {
description = "id for monitor keyvault_api_latency"
value = datadog_monitor.keyvault_api_latency.*.id
output "keyvault_status_id" {
description = "id for monitor keyvault_status"
value = datadog_monitor.keyvault_status.*.id
}

View File

@ -3,11 +3,6 @@ output "postgresql_cpu_usage_id" {
value = datadog_monitor.postgresql_cpu_usage.*.id
}
output "postgresql_no_connection_id" {
description = "id for monitor postgresql_no_connection"
value = datadog_monitor.postgresql_no_connection.*.id
}
output "postgresql_free_storage_id" {
description = "id for monitor postgresql_free_storage"
value = datadog_monitor.postgresql_free_storage.*.id
@ -23,3 +18,8 @@ output "postgresql_memory_usage_id" {
value = datadog_monitor.postgresql_memory_usage.*.id
}
output "postgresql_no_connection_id" {
description = "id for monitor postgresql_no_connection"
value = datadog_monitor.postgresql_no_connection.*.id
}

View File

@ -1,8 +1,3 @@
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
}
output "evictedkeys_id" {
description = "id for monitor evictedkeys"
value = datadog_monitor.evictedkeys.*.id
@ -18,3 +13,8 @@ output "server_load_id" {
value = datadog_monitor.server_load.*.id
}
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
}

View File

@ -1,8 +1,3 @@
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
}
output "cpu_percentage_id" {
description = "id for monitor cpu_percentage"
value = datadog_monitor.cpu_percentage.*.id
@ -13,3 +8,8 @@ output "memory_percentage_id" {
value = datadog_monitor.memory_percentage.*.id
}
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
}

View File

@ -1,20 +1,20 @@
output "servicebus_status_id" {
description = "id for monitor servicebus_status"
value = datadog_monitor.servicebus_status.*.id
}
output "service_bus_no_active_connections_id" {
description = "id for monitor service_bus_no_active_connections"
value = datadog_monitor.service_bus_no_active_connections.*.id
}
output "service_bus_user_errors_id" {
description = "id for monitor service_bus_user_errors"
value = datadog_monitor.service_bus_user_errors.*.id
}
output "service_bus_server_errors_id" {
description = "id for monitor service_bus_server_errors"
value = datadog_monitor.service_bus_server_errors.*.id
}
output "service_bus_user_errors_id" {
description = "id for monitor service_bus_user_errors"
value = datadog_monitor.service_bus_user_errors.*.id
}
output "servicebus_status_id" {
description = "id for monitor servicebus_status"
value = datadog_monitor.servicebus_status.*.id
}

View File

@ -1,25 +1,25 @@
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
}
output "sql-database_cpu_id" {
description = "id for monitor sql-database_cpu"
value = datadog_monitor.sql-database_cpu.*.id
}
output "sql-database_free_space_low_id" {
description = "id for monitor sql-database_free_space_low"
value = datadog_monitor.sql-database_free_space_low.*.id
}
output "sql-database_dtu_consumption_high_id" {
description = "id for monitor sql-database_dtu_consumption_high"
value = datadog_monitor.sql-database_dtu_consumption_high.*.id
}
output "sql-database_deadlocks_count_id" {
description = "id for monitor sql-database_deadlocks_count"
value = datadog_monitor.sql-database_deadlocks_count.*.id
}
output "sql-database_dtu_consumption_high_id" {
description = "id for monitor sql-database_dtu_consumption_high"
value = datadog_monitor.sql-database_dtu_consumption_high.*.id
}
output "sql-database_free_space_low_id" {
description = "id for monitor sql-database_free_space_low"
value = datadog_monitor.sql-database_free_space_low.*.id
}
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
}

View File

@ -3,13 +3,13 @@ output "sql_elasticpool_cpu_id" {
value = datadog_monitor.sql_elasticpool_cpu.*.id
}
output "sql_elasticpool_free_space_low_id" {
description = "id for monitor sql_elasticpool_free_space_low"
value = datadog_monitor.sql_elasticpool_free_space_low.*.id
}
output "sql_elasticpool_dtu_consumption_high_id" {
description = "id for monitor sql_elasticpool_dtu_consumption_high"
value = datadog_monitor.sql_elasticpool_dtu_consumption_high.*.id
}
output "sql_elasticpool_free_space_low_id" {
description = "id for monitor sql_elasticpool_free_space_low"
value = datadog_monitor.sql_elasticpool_free_space_low.*.id
}

View File

@ -1,126 +1,6 @@
output "storage_status_id" {
description = "id for monitor storage_status"
value = datadog_monitor.storage_status.*.id
}
output "blobservices_requests_error_id" {
description = "id for monitor blobservices_requests_error"
value = datadog_monitor.blobservices_requests_error.*.id
}
output "fileservices_requests_error_id" {
description = "id for monitor fileservices_requests_error"
value = datadog_monitor.fileservices_requests_error.*.id
}
output "queueservices_requests_error_id" {
description = "id for monitor queueservices_requests_error"
value = datadog_monitor.queueservices_requests_error.*.id
}
output "tableservices_requests_error_id" {
description = "id for monitor tableservices_requests_error"
value = datadog_monitor.tableservices_requests_error.*.id
}
output "blobservices_latency_id" {
description = "id for monitor blobservices_latency"
value = datadog_monitor.blobservices_latency.*.id
}
output "fileservices_latency_id" {
description = "id for monitor fileservices_latency"
value = datadog_monitor.fileservices_latency.*.id
}
output "queueservices_latency_id" {
description = "id for monitor queueservices_latency"
value = datadog_monitor.queueservices_latency.*.id
}
output "tableservices_latency_id" {
description = "id for monitor tableservices_latency"
value = datadog_monitor.tableservices_latency.*.id
}
output "blob_timeout_error_requests_id" {
description = "id for monitor blob_timeout_error_requests"
value = datadog_monitor.blob_timeout_error_requests.*.id
}
output "file_timeout_error_requests_id" {
description = "id for monitor file_timeout_error_requests"
value = datadog_monitor.file_timeout_error_requests.*.id
}
output "queue_timeout_error_requests_id" {
description = "id for monitor queue_timeout_error_requests"
value = datadog_monitor.queue_timeout_error_requests.*.id
}
output "table_timeout_error_requests_id" {
description = "id for monitor table_timeout_error_requests"
value = datadog_monitor.table_timeout_error_requests.*.id
}
output "blob_network_error_requests_id" {
description = "id for monitor blob_network_error_requests"
value = datadog_monitor.blob_network_error_requests.*.id
}
output "file_network_error_requests_id" {
description = "id for monitor file_network_error_requests"
value = datadog_monitor.file_network_error_requests.*.id
}
output "queue_network_error_requests_id" {
description = "id for monitor queue_network_error_requests"
value = datadog_monitor.queue_network_error_requests.*.id
}
output "table_network_error_requests_id" {
description = "id for monitor table_network_error_requests"
value = datadog_monitor.table_network_error_requests.*.id
}
output "blob_throttling_error_requests_id" {
description = "id for monitor blob_throttling_error_requests"
value = datadog_monitor.blob_throttling_error_requests.*.id
}
output "file_throttling_error_requests_id" {
description = "id for monitor file_throttling_error_requests"
value = datadog_monitor.file_throttling_error_requests.*.id
}
output "queue_throttling_error_requests_id" {
description = "id for monitor queue_throttling_error_requests"
value = datadog_monitor.queue_throttling_error_requests.*.id
}
output "table_throttling_error_requests_id" {
description = "id for monitor table_throttling_error_requests"
value = datadog_monitor.table_throttling_error_requests.*.id
}
output "blob_server_other_error_requests_id" {
description = "id for monitor blob_server_other_error_requests"
value = datadog_monitor.blob_server_other_error_requests.*.id
}
output "file_server_other_error_requests_id" {
description = "id for monitor file_server_other_error_requests"
value = datadog_monitor.file_server_other_error_requests.*.id
}
output "queue_server_other_error_requests_id" {
description = "id for monitor queue_server_other_error_requests"
value = datadog_monitor.queue_server_other_error_requests.*.id
}
output "table_server_other_error_requests_id" {
description = "id for monitor table_server_other_error_requests"
value = datadog_monitor.table_server_other_error_requests.*.id
output "blob_authorization_error_requests_id" {
description = "id for monitor blob_authorization_error_requests"
value = datadog_monitor.blob_authorization_error_requests.*.id
}
output "blob_client_other_error_requests_id" {
@ -128,24 +8,34 @@ output "blob_client_other_error_requests_id" {
value = datadog_monitor.blob_client_other_error_requests.*.id
}
output "file_client_other_error_requests_id" {
description = "id for monitor file_client_other_error_requests"
value = datadog_monitor.file_client_other_error_requests.*.id
output "blob_network_error_requests_id" {
description = "id for monitor blob_network_error_requests"
value = datadog_monitor.blob_network_error_requests.*.id
}
output "queue_client_other_error_requests_id" {
description = "id for monitor queue_client_other_error_requests"
value = datadog_monitor.queue_client_other_error_requests.*.id
output "blob_server_other_error_requests_id" {
description = "id for monitor blob_server_other_error_requests"
value = datadog_monitor.blob_server_other_error_requests.*.id
}
output "table_client_other_error_requests_id" {
description = "id for monitor table_client_other_error_requests"
value = datadog_monitor.table_client_other_error_requests.*.id
output "blob_throttling_error_requests_id" {
description = "id for monitor blob_throttling_error_requests"
value = datadog_monitor.blob_throttling_error_requests.*.id
}
output "blob_authorization_error_requests_id" {
description = "id for monitor blob_authorization_error_requests"
value = datadog_monitor.blob_authorization_error_requests.*.id
output "blob_timeout_error_requests_id" {
description = "id for monitor blob_timeout_error_requests"
value = datadog_monitor.blob_timeout_error_requests.*.id
}
output "blobservices_latency_id" {
description = "id for monitor blobservices_latency"
value = datadog_monitor.blobservices_latency.*.id
}
output "blobservices_requests_error_id" {
description = "id for monitor blobservices_requests_error"
value = datadog_monitor.blobservices_requests_error.*.id
}
output "file_authorization_error_requests_id" {
@ -153,13 +43,123 @@ output "file_authorization_error_requests_id" {
value = datadog_monitor.file_authorization_error_requests.*.id
}
output "file_client_other_error_requests_id" {
description = "id for monitor file_client_other_error_requests"
value = datadog_monitor.file_client_other_error_requests.*.id
}
output "file_network_error_requests_id" {
description = "id for monitor file_network_error_requests"
value = datadog_monitor.file_network_error_requests.*.id
}
output "file_server_other_error_requests_id" {
description = "id for monitor file_server_other_error_requests"
value = datadog_monitor.file_server_other_error_requests.*.id
}
output "file_throttling_error_requests_id" {
description = "id for monitor file_throttling_error_requests"
value = datadog_monitor.file_throttling_error_requests.*.id
}
output "file_timeout_error_requests_id" {
description = "id for monitor file_timeout_error_requests"
value = datadog_monitor.file_timeout_error_requests.*.id
}
output "fileservices_latency_id" {
description = "id for monitor fileservices_latency"
value = datadog_monitor.fileservices_latency.*.id
}
output "fileservices_requests_error_id" {
description = "id for monitor fileservices_requests_error"
value = datadog_monitor.fileservices_requests_error.*.id
}
output "queue_authorization_error_requests_id" {
description = "id for monitor queue_authorization_error_requests"
value = datadog_monitor.queue_authorization_error_requests.*.id
}
output "queue_client_other_error_requests_id" {
description = "id for monitor queue_client_other_error_requests"
value = datadog_monitor.queue_client_other_error_requests.*.id
}
output "queue_network_error_requests_id" {
description = "id for monitor queue_network_error_requests"
value = datadog_monitor.queue_network_error_requests.*.id
}
output "queue_server_other_error_requests_id" {
description = "id for monitor queue_server_other_error_requests"
value = datadog_monitor.queue_server_other_error_requests.*.id
}
output "queue_throttling_error_requests_id" {
description = "id for monitor queue_throttling_error_requests"
value = datadog_monitor.queue_throttling_error_requests.*.id
}
output "queue_timeout_error_requests_id" {
description = "id for monitor queue_timeout_error_requests"
value = datadog_monitor.queue_timeout_error_requests.*.id
}
output "queueservices_latency_id" {
description = "id for monitor queueservices_latency"
value = datadog_monitor.queueservices_latency.*.id
}
output "queueservices_requests_error_id" {
description = "id for monitor queueservices_requests_error"
value = datadog_monitor.queueservices_requests_error.*.id
}
output "storage_status_id" {
description = "id for monitor storage_status"
value = datadog_monitor.storage_status.*.id
}
output "table_authorization_error_requests_id" {
description = "id for monitor table_authorization_error_requests"
value = datadog_monitor.table_authorization_error_requests.*.id
}
output "table_client_other_error_requests_id" {
description = "id for monitor table_client_other_error_requests"
value = datadog_monitor.table_client_other_error_requests.*.id
}
output "table_network_error_requests_id" {
description = "id for monitor table_network_error_requests"
value = datadog_monitor.table_network_error_requests.*.id
}
output "table_server_other_error_requests_id" {
description = "id for monitor table_server_other_error_requests"
value = datadog_monitor.table_server_other_error_requests.*.id
}
output "table_throttling_error_requests_id" {
description = "id for monitor table_throttling_error_requests"
value = datadog_monitor.table_throttling_error_requests.*.id
}
output "table_timeout_error_requests_id" {
description = "id for monitor table_timeout_error_requests"
value = datadog_monitor.table_timeout_error_requests.*.id
}
output "tableservices_latency_id" {
description = "id for monitor tableservices_latency"
value = datadog_monitor.tableservices_latency.*.id
}
output "tableservices_requests_error_id" {
description = "id for monitor tableservices_requests_error"
value = datadog_monitor.tableservices_requests_error.*.id
}

View File

@ -1,3 +1,18 @@
output "conversion_errors_id" {
description = "id for monitor conversion_errors"
value = datadog_monitor.conversion_errors.*.id
}
output "failed_function_requests_id" {
description = "id for monitor failed_function_requests"
value = datadog_monitor.failed_function_requests.*.id
}
output "runtime_errors_id" {
description = "id for monitor runtime_errors"
value = datadog_monitor.runtime_errors.*.id
}
output "status_id" {
description = "id for monitor status"
value = datadog_monitor.status.*.id
@ -8,18 +23,3 @@ output "su_utilization_id" {
value = datadog_monitor.su_utilization.*.id
}
output "failed_function_requests_id" {
description = "id for monitor failed_function_requests"
value = datadog_monitor.failed_function_requests.*.id
}
output "conversion_errors_id" {
description = "id for monitor conversion_errors"
value = datadog_monitor.conversion_errors.*.id
}
output "runtime_errors_id" {
description = "id for monitor runtime_errors"
value = datadog_monitor.runtime_errors.*.id
}

View File

@ -1,8 +1,3 @@
output "virtualmachine_status_id" {
description = "id for monitor virtualmachine_status"
value = datadog_monitor.virtualmachine_status.*.id
}
output "virtualmachine_cpu_usage_id" {
description = "id for monitor virtualmachine_cpu_usage"
value = datadog_monitor.virtualmachine_cpu_usage.*.id
@ -13,18 +8,23 @@ output "virtualmachine_credit_cpu_remaining_too_low_id" {
value = datadog_monitor.virtualmachine_credit_cpu_remaining_too_low.*.id
}
output "virtualmachine_ram_reserved_id" {
description = "id for monitor virtualmachine_ram_reserved"
value = datadog_monitor.virtualmachine_ram_reserved.*.id
}
output "virtualmachine_disk_space_id" {
description = "id for monitor virtualmachine_disk_space"
value = datadog_monitor.virtualmachine_disk_space.*.id
}
output "virtualmachine_ram_reserved_id" {
description = "id for monitor virtualmachine_ram_reserved"
value = datadog_monitor.virtualmachine_ram_reserved.*.id
}
output "virtualmachine_requests_failed_id" {
description = "id for monitor virtualmachine_requests_failed"
value = datadog_monitor.virtualmachine_requests_failed.*.id
}
output "virtualmachine_status_id" {
description = "id for monitor virtualmachine_status"
value = datadog_monitor.virtualmachine_status.*.id
}

View File

@ -19,12 +19,12 @@ Creates DataDog monitors with the following checks:
- GCP Big Query Available Slots
- GCP Big Query Concurrent Queries
- GCP Big Query Execution Time
- GCP Big Query Scanned Bytes Billed
- GCP Big Query Scanned Bytes
- GCP Big Query Scanned Bytes Billed
- GCP Big Query Stored Bytes
- GCP Big Query Table Count
- GCP Big Query Uploaded Bytes Billed
- GCP Big Query Uploaded Bytes
- GCP Big Query Uploaded Bytes Billed
## Inputs

View File

@ -1,3 +1,8 @@
output "available_slots_id" {
description = "id for monitor available_slots"
value = datadog_monitor.available_slots.*.id
}
output "concurrent_queries_id" {
description = "id for monitor concurrent_queries"
value = datadog_monitor.concurrent_queries.*.id
@ -18,11 +23,6 @@ output "scanned_bytes_billed_id" {
value = datadog_monitor.scanned_bytes_billed.*.id
}
output "available_slots_id" {
description = "id for monitor available_slots"
value = datadog_monitor.available_slots.*.id
}
output "stored_bytes_id" {
description = "id for monitor stored_bytes"
value = datadog_monitor.stored_bytes.*.id

View File

@ -17,11 +17,11 @@ module "datadog-monitors-cloud-gcp-cloud-sql-common" {
Creates DataDog monitors with the following checks:
- Cloud SQL CPU Utilization
- Cloud SQL Disk Utilization forecast
- Cloud SQL Disk Utilization
- Cloud SQL Disk Utilization forecast
- Cloud SQL Failover Unavailable
- Cloud SQL Memory Utilization forecast
- Cloud SQL Memory Utilization
- Cloud SQL Memory Utilization forecast
## Inputs

View File

@ -13,6 +13,11 @@ output "disk_utilization_forecast_id" {
value = datadog_monitor.disk_utilization_forecast.*.id
}
output "failover_unavailable_id" {
description = "id for monitor failover_unavailable"
value = datadog_monitor.failover_unavailable.*.id
}
output "memory_utilization_id" {
description = "id for monitor memory_utilization"
value = datadog_monitor.memory_utilization.*.id
@ -23,8 +28,3 @@ output "memory_utilization_forecast_id" {
value = datadog_monitor.memory_utilization_forecast.*.id
}
output "failover_unavailable_id" {
description = "id for monitor failover_unavailable"
value = datadog_monitor.failover_unavailable.*.id
}

View File

@ -1,3 +1,13 @@
output "backend_latency_bucket_id" {
description = "id for monitor backend_latency_bucket"
value = datadog_monitor.backend_latency_bucket.*.id
}
output "backend_latency_service_id" {
description = "id for monitor backend_latency_service"
value = datadog_monitor.backend_latency_service.*.id
}
output "error_rate_4xx_id" {
description = "id for monitor error_rate_4xx"
value = datadog_monitor.error_rate_4xx.*.id
@ -8,16 +18,6 @@ output "error_rate_5xx_id" {
value = datadog_monitor.error_rate_5xx.*.id
}
output "backend_latency_service_id" {
description = "id for monitor backend_latency_service"
value = datadog_monitor.backend_latency_service.*.id
}
output "backend_latency_bucket_id" {
description = "id for monitor backend_latency_bucket"
value = datadog_monitor.backend_latency_bucket.*.id
}
output "request_count_id" {
description = "id for monitor request_count"
value = datadog_monitor.request_count.*.id

View File

@ -1,13 +1,3 @@
output "not_responding_id" {
description = "id for monitor not_responding"
value = datadog_monitor.not_responding.*.id
}
output "cluster_status_not_green_id" {
description = "id for monitor cluster_status_not_green"
value = datadog_monitor.cluster_status_not_green.*.id
}
output "cluster_initializing_shards_id" {
description = "id for monitor cluster_initializing_shards"
value = datadog_monitor.cluster_initializing_shards.*.id
@ -18,44 +8,29 @@ output "cluster_relocating_shards_id" {
value = datadog_monitor.cluster_relocating_shards.*.id
}
output "cluster_status_not_green_id" {
description = "id for monitor cluster_status_not_green"
value = datadog_monitor.cluster_status_not_green.*.id
}
output "cluster_unassigned_shards_id" {
description = "id for monitor cluster_unassigned_shards"
value = datadog_monitor.cluster_unassigned_shards.*.id
}
output "node_free_space_id" {
description = "id for monitor node_free_space"
value = datadog_monitor.node_free_space.*.id
output "fetch_change_id" {
description = "id for monitor fetch_change"
value = datadog_monitor.fetch_change.*.id
}
output "jvm_heap_memory_usage_id" {
description = "id for monitor jvm_heap_memory_usage"
value = datadog_monitor.jvm_heap_memory_usage.*.id
output "fetch_latency_id" {
description = "id for monitor fetch_latency"
value = datadog_monitor.fetch_latency.*.id
}
output "jvm_memory_young_usage_id" {
description = "id for monitor jvm_memory_young_usage"
value = datadog_monitor.jvm_memory_young_usage.*.id
}
output "jvm_memory_old_usage_id" {
description = "id for monitor jvm_memory_old_usage"
value = datadog_monitor.jvm_memory_old_usage.*.id
}
output "jvm_gc_old_collection_latency_id" {
description = "id for monitor jvm_gc_old_collection_latency"
value = datadog_monitor.jvm_gc_old_collection_latency.*.id
}
output "jvm_gc_young_collection_latency_id" {
description = "id for monitor jvm_gc_young_collection_latency"
value = datadog_monitor.jvm_gc_young_collection_latency.*.id
}
output "indexing_latency_id" {
description = "id for monitor indexing_latency"
value = datadog_monitor.indexing_latency.*.id
output "field_data_evictions_change_id" {
description = "id for monitor field_data_evictions_change"
value = datadog_monitor.field_data_evictions_change.*.id
}
output "flush_latency_id" {
@ -68,29 +43,44 @@ output "http_connections_anomaly_id" {
value = datadog_monitor.http_connections_anomaly.*.id
}
output "search_query_latency_id" {
description = "id for monitor search_query_latency"
value = datadog_monitor.search_query_latency.*.id
output "indexing_latency_id" {
description = "id for monitor indexing_latency"
value = datadog_monitor.indexing_latency.*.id
}
output "fetch_latency_id" {
description = "id for monitor fetch_latency"
value = datadog_monitor.fetch_latency.*.id
output "jvm_gc_old_collection_latency_id" {
description = "id for monitor jvm_gc_old_collection_latency"
value = datadog_monitor.jvm_gc_old_collection_latency.*.id
}
output "search_query_change_id" {
description = "id for monitor search_query_change"
value = datadog_monitor.search_query_change.*.id
output "jvm_gc_young_collection_latency_id" {
description = "id for monitor jvm_gc_young_collection_latency"
value = datadog_monitor.jvm_gc_young_collection_latency.*.id
}
output "fetch_change_id" {
description = "id for monitor fetch_change"
value = datadog_monitor.fetch_change.*.id
output "jvm_heap_memory_usage_id" {
description = "id for monitor jvm_heap_memory_usage"
value = datadog_monitor.jvm_heap_memory_usage.*.id
}
output "field_data_evictions_change_id" {
description = "id for monitor field_data_evictions_change"
value = datadog_monitor.field_data_evictions_change.*.id
output "jvm_memory_old_usage_id" {
description = "id for monitor jvm_memory_old_usage"
value = datadog_monitor.jvm_memory_old_usage.*.id
}
output "jvm_memory_young_usage_id" {
description = "id for monitor jvm_memory_young_usage"
value = datadog_monitor.jvm_memory_young_usage.*.id
}
output "node_free_space_id" {
description = "id for monitor node_free_space"
value = datadog_monitor.node_free_space.*.id
}
output "not_responding_id" {
description = "id for monitor not_responding"
value = datadog_monitor.not_responding.*.id
}
output "query_cache_evictions_change_id" {
@ -103,6 +93,16 @@ output "request_cache_evictions_change_id" {
value = datadog_monitor.request_cache_evictions_change.*.id
}
output "search_query_change_id" {
description = "id for monitor search_query_change"
value = datadog_monitor.search_query_change.*.id
}
output "search_query_latency_id" {
description = "id for monitor search_query_latency"
value = datadog_monitor.search_query_latency.*.id
}
output "task_time_in_queue_change_id" {
description = "id for monitor task_time_in_queue_change"
value = datadog_monitor.task_time_in_queue_change.*.id

View File

@ -3,6 +3,11 @@ output "mongodb_primary_id" {
value = datadog_monitor.mongodb_primary.*.id
}
output "mongodb_replication_id" {
description = "id for monitor mongodb_replication"
value = datadog_monitor.mongodb_replication.*.id
}
output "mongodb_secondary_id" {
description = "id for monitor mongodb_secondary"
value = datadog_monitor.mongodb_secondary.*.id
@ -13,8 +18,3 @@ output "mongodb_server_count_id" {
value = datadog_monitor.mongodb_server_count.*.id
}
output "mongodb_replication_id" {
description = "id for monitor mongodb_replication"
value = datadog_monitor.mongodb_replication.*.id
}

View File

@ -1,3 +1,8 @@
output "mysql_aborted_id" {
description = "id for monitor mysql_aborted"
value = datadog_monitor.mysql_aborted.*.id
}
output "mysql_availability_id" {
description = "id for monitor mysql_availability"
value = datadog_monitor.mysql_availability.*.id
@ -8,16 +13,6 @@ output "mysql_connection_id" {
value = datadog_monitor.mysql_connection.*.id
}
output "mysql_aborted_id" {
description = "id for monitor mysql_aborted"
value = datadog_monitor.mysql_aborted.*.id
}
output "mysql_slow_id" {
description = "id for monitor mysql_slow"
value = datadog_monitor.mysql_slow.*.id
}
output "mysql_pool_efficiency_id" {
description = "id for monitor mysql_pool_efficiency"
value = datadog_monitor.mysql_pool_efficiency.*.id
@ -28,13 +23,18 @@ output "mysql_pool_utilization_id" {
value = datadog_monitor.mysql_pool_utilization.*.id
}
output "mysql_threads_anomaly_id" {
description = "id for monitor mysql_threads_anomaly"
value = datadog_monitor.mysql_threads_anomaly.*.id
}
output "mysql_questions_anomaly_id" {
description = "id for monitor mysql_questions_anomaly"
value = datadog_monitor.mysql_questions_anomaly.*.id
}
output "mysql_slow_id" {
description = "id for monitor mysql_slow"
value = datadog_monitor.mysql_slow.*.id
}
output "mysql_threads_anomaly_id" {
description = "id for monitor mysql_threads_anomaly"
value = datadog_monitor.mysql_threads_anomaly.*.id
}

View File

@ -1,6 +1,6 @@
output "not_responding_id" {
description = "id for monitor not_responding"
value = datadog_monitor.not_responding.*.id
output "blocked_clients_id" {
description = "id for monitor blocked_clients"
value = datadog_monitor.blocked_clients.*.id
}
output "evicted_keys_id" {
@ -13,9 +13,9 @@ output "expirations_id" {
value = datadog_monitor.expirations.*.id
}
output "blocked_clients_id" {
description = "id for monitor blocked_clients"
value = datadog_monitor.blocked_clients.*.id
output "hitrate_id" {
description = "id for monitor hitrate"
value = datadog_monitor.hitrate.*.id
}
output "keyspace_full_id" {
@ -23,9 +23,9 @@ output "keyspace_full_id" {
value = datadog_monitor.keyspace_full.*.id
}
output "memory_used_id" {
description = "id for monitor memory_used"
value = datadog_monitor.memory_used.*.id
output "latency_id" {
description = "id for monitor latency"
value = datadog_monitor.latency.*.id
}
output "memory_frag_id" {
@ -33,18 +33,18 @@ output "memory_frag_id" {
value = datadog_monitor.memory_frag.*.id
}
output "memory_used_id" {
description = "id for monitor memory_used"
value = datadog_monitor.memory_used.*.id
}
output "not_responding_id" {
description = "id for monitor not_responding"
value = datadog_monitor.not_responding.*.id
}
output "rejected_connections_id" {
description = "id for monitor rejected_connections"
value = datadog_monitor.rejected_connections.*.id
}
output "latency_id" {
description = "id for monitor latency"
value = datadog_monitor.latency.*.id
}
output "hitrate_id" {
description = "id for monitor hitrate"
value = datadog_monitor.hitrate.*.id
}

View File

@ -1,10 +1,10 @@
output "datadog_nginx_process_id" {
description = "id for monitor datadog_nginx_process"
value = datadog_monitor.datadog_nginx_process.*.id
}
output "datadog_nginx_dropped_connections_id" {
description = "id for monitor datadog_nginx_dropped_connections"
value = datadog_monitor.datadog_nginx_dropped_connections.*.id
}
output "datadog_nginx_process_id" {
description = "id for monitor datadog_nginx_process"
value = datadog_monitor.datadog_nginx_process.*.id
}

View File

@ -1,8 +1,8 @@
#!/bin/bash
set -u
source "$(dirname $0)/utils.sh"
goto_root
init
echo "Check requirements"
function check_command() {
local cmd="$1"
@ -24,7 +24,9 @@ function check_version() {
if [[ "$1" == "terraform" ]]; then
tmp_dir=$(mktemp -d)
cd ${tmp_dir}
set +o pipefail # terraform fails on command piping when not last version
cur_ver=$(terraform version | head -n 1 | cut -d' ' -f2)
set -o pipefail
cur_ver=${cur_ver#"v"}
cd - > /dev/null
rm -fr ${tmp_dir}
@ -34,7 +36,7 @@ function check_version() {
req_ver="0.6.0"
cur_ver=$(terraform-docs --version)
else
return
return 0
fi
if ! verlte $req_ver $cur_ver; then
echo "This requires at least version ${req_ver} of $1, please upgrade (current version is ${cur_ver})"
@ -42,7 +44,8 @@ function check_version() {
fi
}
for cmd in terraform terraform-docs; do
for cmd in terraform terraform-docs terraform-config-inspect jq; do
echo -e "\t- Check command \"$cmd\" exists and in right version"
check_command $cmd
check_version $cmd
done

View File

@ -1,24 +0,0 @@
#!/bin/bash
set -xueo pipefail
source "$(dirname $0)/utils.sh"
goto_root
# loop over every monitors set
for path in $(find "$(get_scope $1)" -name 'monitors-*.tf' -print | sort -fdbi); do
cd $(dirname $path)
# empty outputs
> outputs.tf
# loop over monitors for each set
for monitor in $(grep 'resource "datadog_monitor"' $(basename $path) | awk '{print $3}' | tr -d '"' ); do
# create output block for current monitor
cat >> outputs.tf <<EOF
output "${monitor}_id" {
description = "id for monitor $monitor"
value = datadog_monitor.${monitor}.*.id
}
EOF
done
cd - >> /dev/null
done

View File

@ -1,128 +0,0 @@
#!/bin/bash
set -xueo pipefail
source "$(dirname $0)/utils.sh"
goto_root
# download awk script to hack terraform-docs
TERRAFORM_AWK="/tmp/terraform-docs.awk"
curl -Lo ${TERRAFORM_AWK} "https://raw.githubusercontent.com/cloudposse/build-harness/master/bin/terraform-docs.awk"
## root README generator
# only keep current README from begining to "Monitors summary" section (delete monitors list)
sed -i '/### Monitors summary ###/q' README.md
# add a newline after listing section
echo >> README.md
# loop over all ready monitors sets on the repo
for path in $(find -mindepth 1 -type d ! -path '*/.*' ! -path './scripts*' -print | sort -fdbi); do
# split path in directories
directories=($(list_dirs $path))
# loop over directories in path
for i in $(seq 1 $((${#directories[@]}-1))); do
## add tabulation for every subdirectory
echo -en "\t" >> README.md
done
# add link to list of monitors sets
echo -en "- [$(basename ${path})](https://git.fr.clara.net/claranet/pt-monitoring/projects/datadog/terraform/monitors/tree/master/" >> README.md
# add path to link
for directory in "${directories[@]}"; do
echo -en "${directory}/" >> README.md
done
# end of markdown link
echo ")" >> README.md
done
# this is the pattern from where custom information is saved to be restored
PATTERN_DOC="Related documentation"
# loop over every monitors set readme
for path in $(find "$(get_scope $1)" -name 'monitors-*.tf' -print | sort -fdbi); do
cd $(dirname $path)
EXIST=0
if [ -f README.md ]; then
mv README.md README.md.bak
EXIST=1
fi
# module name from path
module=$(list_dirs $(dirname ${path}))
# module name with space as separator
module_space=${module^^}
# module name with dash as separator
module_dash=${module//[ ]/-}
# module name with slash as separator
module_slash=${module//[ ]/\/}
# (re)generate README from scratch
cat <<EOF > README.md
# ${module_space} DataDog monitors
## How to use this module
\`\`\`
module "datadog-monitors-${module_dash}" {
source = "git::ssh://git@git.fr.clara.net/claranet/pt-monitoring/projects/datadog/terraform/monitors.git//${module_slash}?ref={revision}"
environment = var.environment
message = module.datadog-message-alerting.alerting-message
EOF
# if README already exist
if [[ $EXIST -eq 1 ]]; then
# take all custom config in declaration module example after "message" and until the end of block to restore it
sed -n '/^[[:space:]]*message[[:space:]]*=.*/,/^\}/p' README.md.bak | tail -n +2 | head -n -1 >> README.md
fi
# close block and generate the next until list of monitors
cat <<EOF >> README.md
}
\`\`\`
## Purpose
Creates DataDog monitors with the following checks:
EOF
SAVEIFS=$IFS
# allow looping over strings which contains spaces
IFS=$(echo -en "\n\b")
# loop over each monitor in the set
for match in $(grep -E ^[[:space:]]+name[[:space:]]+= $(basename ${path}) | sort -fdbi); do
## TODO rewrite this (and other things) using:
## terraform-config-inspect --json| jq -C
## awk '1;/^\}/{exit}' monitors-ingress.tf # with line numer of each resource
# parse monitor's name
name=$(get_name "${match}")
# search if monitor is enabled
[[ "$(grep -B1 "$name" $(basename ${path}) | grep enabled)" =~ ^[[:space:]]*count[[:space:]]*=[[:space:]]*var\.([a-z0-9_]*_enabled) ]] &&
# add "disabled by default" mention if not enabled
if ! grep -A4 "${BASH_REMATCH[1]}" inputs.tf | grep default.*true; then
name="${name} (disabled by default)"
fi
# monitor name element to the list and replace "could reach" pattern to "forecast" for better naming
echo "- ${name/could reach/forecast}" >> README.md
done
IFS=$SAVEIFS
echo >> README.md
# hack for terraform-docs with terraform 0.12 / HCL2 support
tmp_tf=$(mktemp -d)
awk -f ${TERRAFORM_AWK} ./*.tf > ${tmp_tf}/main.tf
# auto generate terraform docs (inputs and outputs)
terraform-docs --with-aggregate-type-defaults md table ${tmp_tf}/ >> README.md
rm -fr ${tmp_tf}
# if README does not exist
if [[ $EXIST -eq 0 ]]; then
# Simply add empty documentation section
cat <<EOF >> README.md
## ${PATTERN_DOC}
EOF
else
# else restore the custom information saved before
grep -Pzo --color=never ".*${PATTERN_DOC}(.*\n)*" README.md.bak | head -n -1 >> README.md
rm README.md.bak
fi
# force unix format (I don't know why for now but you never know)
dos2unix README.md
cd - >> /dev/null
done

27
scripts/10_update_output.sh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/bash
source "$(dirname $0)/utils.sh"
init
echo "Generate terraform outputs.tf files for every monitors modules"
# loop over every modules
for module in $(browse_modules "$(get_scope ${1:-})" 'monitors-*.tf'); do
echo -e "\t- Generate outputs.tf for module: ${module}"
cd ${module}
# empty outputs
> outputs.tf
# gather a information line splitted with "|" for every monitor
for row in $(terraform-config-inspect --json | jq -c -r '.managed_resources | map([.name] | join("|")) | join("\n")'); do
# split line for each info one variable
IFS='|' read monitor type < <(echo $row)
# create output block for current monitor
cat >> outputs.tf <<EOF
output "${monitor}_id" {
description = "id for monitor ${monitor}"
value = datadog_monitor.${monitor}.*.id
}
EOF
done
cd - >> /dev/null
done

View File

@ -0,0 +1,28 @@
#!/bin/bash
source "$(dirname $0)/utils.sh"
init
echo "Update global README.md"
# only keep current README from begining to "Monitors summary" section (delete monitors list)
sed -i '/### Monitors summary ###/q' README.md
# add a newline after listing section
echo >> README.md
# loop over path of modules tree
for path in $(find -mindepth 1 -type d ! -path '*/.*' ! -path './scripts*' -print | sort -fdbi); do
# split path in directories
directories=($(list_dirs $path))
# loop over directories in path
for i in $(seq 1 $((${#directories[@]}-1))); do
## add tabulation for every subdirectory
echo -en "\t" >> README.md
done
# add link to list of monitors sets
echo -en "- [$(basename ${path})](https://git.fr.clara.net/claranet/pt-monitoring/projects/datadog/terraform/monitors/tree/master/" >> README.md
# add path to link
for directory in "${directories[@]}"; do
echo -en "${directory}/" >> README.md
done
# end of markdown link
echo ")" >> README.md
done

View File

@ -0,0 +1,109 @@
#!/bin/bash
source "$(dirname $0)/utils.sh"
init
echo "Update README.md for every monitors modules"
# download awk script to hack terraform-docs
TERRAFORM_AWK="/tmp/terraform-docs.awk"
curl -Lso ${TERRAFORM_AWK} "https://raw.githubusercontent.com/cloudposse/build-harness/master/bin/terraform-docs.awk"
# this is the pattern from where custom information is saved to be restored
PATTERN_DOC="Related documentation"
# loop over every modules
for module in $(browse_modules "$(get_scope ${1:-})" 'monitors-*.tf'); do
echo -e "\t- Generate README.md for module: ${module}"
cd ${module}
EXIST=0
if [ -f README.md ]; then
mv README.md README.md.bak
EXIST=1
fi
# module name from path
module_space=$(list_dirs ${module})
# module name with space as separator
module_upper=${module_space^^}
# module name with dash as separator
module_dash=${module_space//[ ]/-}
# module name with slash as separator
module_slash=${module_space//[ ]/\/}
# (re)generate README from scratch
cat <<EOF > README.md
# ${module_upper} DataDog monitors
## How to use this module
\`\`\`
module "datadog-monitors-${module_dash}" {
source = "git::ssh://git@git.fr.clara.net/claranet/pt-monitoring/projects/datadog/terraform/monitors.git//${module_slash}?ref={revision}"
environment = var.environment
message = module.datadog-message-alerting.alerting-message
EOF
# if README already exist
if [[ $EXIST -eq 1 ]]; then
# take all custom config in declaration module example after "message" and until the end of block to restore it
sed -n '/^[[:space:]]*message[[:space:]]*=.*/,/^\}/p' README.md.bak | tail -n +2 | head -n -1 >> README.md
fi
# close block and generate the next until list of monitors
cat <<EOF >> README.md
}
\`\`\`
## Purpose
Creates DataDog monitors with the following checks:
EOF
list=""
# gather a information line splitted with "|" for every monitor
for row in $(terraform-config-inspect --json | jq -c -r '.managed_resources | map([.pos.filename, .pos.line] | join("|")) | join("\n")' | sort -fdbi); do
# split line for each info one variable
IFS='|' read filename line < <(echo $row)
# gather all config HCL code for current monitor
set +o pipefail
config=$(tail -n +${line} ${filename} | sed '/^}/q')
set -o pipefail
# parse monitor's name
name=$(get_name "$(echo "${config}" | grep 'name[[:space:]]*=')")
# search if monitor is enabled
[[ "$(echo "${config}" | grep 'count[[:space:]]*=')" =~ ^[[:space:]]*count[[:space:]]*=[[:space:]]*var\.([a-z0-9_]*_enabled) ]] &&
# add "disabled by default" mention if not enabled
if ! grep -A4 "${BASH_REMATCH[1]}" inputs.tf | grep -q default.*true; then
name="${name} (disabled by default)"
fi
# append new line to list if not empty
if ! [ -z "${list}" ]; then
list="${list}\n"
fi
# append name to list and improve forecast naming
list="${list}- ${name/could reach/forecast}"
done
# write sorted list to readme appending newline to end
echo -e "$(echo -e "${list}" | sort -fdbi)\n" >> README.md
# hack for terraform-docs with terraform 0.12 / HCL2 support
tmp_tf=$(mktemp -d)
awk -f ${TERRAFORM_AWK} ./*.tf > ${tmp_tf}/main.tf
# auto generate terraform docs (inputs and outputs)
terraform-docs --with-aggregate-type-defaults md table ${tmp_tf}/ >> README.md
rm -fr ${tmp_tf}
# if README does not exist
if [[ $EXIST -eq 0 ]]; then
# Simply add empty documentation section
cat <<EOF >> README.md
## ${PATTERN_DOC}
EOF
else
# else restore the custom information saved before
grep -Pzo --color=never ".*${PATTERN_DOC}(.*\n)*" README.md.bak | head -n -1 >> README.md
rm -f README.md.bak
fi
# force unix format (I don't know why for now but you never know)
dos2unix README.md 2> /dev/null
cd - >> /dev/null
done

View File

@ -1,18 +1,18 @@
#!/bin/bash
set -xueo pipefail
source "$(dirname $0)/utils.sh"
goto_root
init
echo "Generate outputs.tf files when does not exist for every monitors modules"
root=$(basename ${PWD})
# loop over every monitors set
for path in $(find "$(get_scope $1)" -name 'monitors-*.tf' -print | sort -fdbi); do
cd $(dirname $path)
# loop over every modules
for module in $(browse_modules "$(get_scope ${1:-})" 'monitors-*.tf'); do
cd ${module}
# get name of the monitors set directory
resource="$(basename $(dirname $path))"
resource="$(basename ${module})"
# if modules.tf does not exist AND if this set respect our tagging convention
if ! [ -f modules.tf ] && grep -q filter_tags_use_defaults inputs.tf; then
echo -e "\t- Generate modules.tf for module: ${module}"
relative=""
current="${PWD}"
# iterate on path until we go back to root
@ -27,11 +27,13 @@ for path in $(find "$(get_scope $1)" -name 'monitors-*.tf' -print | sort -fdbi);
module "filter-tags" {
source = "${relative}common/filter-tags"
environment = var.environment
resource = "$resource"
filter_tags_use_defaults = var.filter_tags_use_defaults
filter_tags_custom = var.filter_tags_custom
environment = var.environment
resource = "$resource"
filter_tags_use_defaults = var.filter_tags_use_defaults
filter_tags_custom = var.filter_tags_custom
filter_tags_custom_excluded = var.filter_tags_custom_excluded
}
EOF
fi
cd - >> /dev/null

View File

@ -1,14 +1,15 @@
#!/bin/bash
set -xueo pipefail
source "$(dirname $0)/utils.sh"
goto_root
init
echo "Check best practices respect"
# loop over every monitors set
for path in $(find "$(get_scope $1)" -name 'monitors-*.tf' -print | sort -fdbi); do
echo -e "\t- Check only one notify_no_data set to true per module"
# loop over every modules
for module in $(browse_modules "$(get_scope ${1:-})" 'monitors-*.tf'); do
# check if there is more than 1 notify_no_data parameter set to true per set of monitors
if [[ $(grep -c notify_no_data.*true $path) -gt 1 ]]; then
echo "More than one notify_no_data set to true on $path"
if [[ $(cat ${module}/monitors-*.tf | grep -c notify_no_data.*true) -gt 1 ]]; then
echo "More than one notify_no_data set to true on $module"
exit 1
fi
done

View File

@ -1,14 +1,21 @@
#!/bin/bash
set -xueo pipefail
source "$(dirname $0)/utils.sh"
goto_root
init
echo "Check terraform CI"
# Clean when exit
err() {
rm -f "${module}/tmp.tf"
}
trap 'err $LINENO' ERR TERM EXIT INT
provider_version=$(grep ^[[:space:]]*version[[:space:]]= README.md | awk '{print $3}')
for path in $(find "$(get_scope $1)" -name 'inputs.tf' -print); do
dir=$(dirname ${path})
cat <<EOF > ${dir}/tmp.tf
# loop over every modules
for module in $(browse_modules "$(get_scope ${1:-})" 'inputs.tf'); do
echo -e "\t- Terraform validate on module: ${module}"
cat <<EOF > ${module}/tmp.tf
provider "datadog" {
version = $provider_version
@ -27,13 +34,14 @@ variable "datadog_app_key" {
}
EOF
if [ -f ${dir}/test.tf.ci ]; then
cat ${dir}/test.tf.ci >> ${dir}/tmp.tf
if [ -f ${module}/test.tf.ci ]; then
cat ${module}/test.tf.ci >> ${module}/tmp.tf
fi
terraform init ${dir}
terraform validate ${dir}
rm -f ${dir}/tmp.tf
terraform init ${module} > /tmp/null
terraform validate ${module}
rm -f ${module}/tmp.tf
done
echo -e "\t- Terraform fmt recursive"
terraform fmt -recursive

54
scripts/README.md Normal file
View File

@ -0,0 +1,54 @@
# Datadog scripts
## Summary
This repository contains a `scripts` directory where there are multiple scripts helping to different things:
- help and automate for some boring and repetitive tasks.
- keep everything up to date and warn if you forget.
- compliant checks and ensure best practices are respected.
- code validation for continuous integration.
## Structure
There are two kinds of scripts naming:
- `[0-9][0-9]_script_name.sh`: will be automatically run by `auto_update.sh` wrapper.
- `script_name.sh`: should be run manually.
Here is a list of scripts and their purpose:
- `auto_update.sh`: is the most important and the one the must used. It is a simple wrapper which will calls every other `[0-9][0-9]*` scripts.
- It should be run by contributor after every change.
- The CI will also run it and it will fail if it detects any change compared to commit.
- "Children" scripts could be run individually if you know exactly what you need to update after a change.
- This script all "children" scripts takes one optional parameter to limit execution to a specific sub path. Else this will run on all directories.
- `00_requirements.sh`: check some requirements like `terraform` command exists before run other scripts.
- `10_update_output.sh`: will generate and update all `outputs.tf`.
- `20_update_global_readme.sh`: will update the main `README.md` file and generate the list of all modules browsing the repository.
- `20_update_modules_readmes.sh`: will create and update `README.md` for each module. It will save all manual changes below `## Related documentation` section.
- `30_update_module.sh`: will create `modules.tf` file per module when does not exist.
- `90_best_practices.sh`: will check compliance and best practices respect.
- `99_terraform.sh`: terraform CI (init & validate only while auto apply is done in another pipeline).
- `utils.sh`: contains useful functions common to multiple scripts. It is not attended to be run.
- `changelog.sh`: helper script to release a new version.
- generate and update `CHANGELOG.md` file from git history.
- filter to list only "done" issues from JIRA.
- close all issues on JIRA.
- fix version for all issues on JIRA.
- create release for current version on JIRA.
## Usage
After any change on this repo, you need to run the `./scripts/auto_update.sh [PATH_TO_MODULE]` command to make sure all is up to date otherwise the CI pipeline will fail:
The parameter is optional and it will limit the scripts execution on a specific path on the repository
On linux system it is possible to run the script directly while `terraform`, `terraform-docs`, `terraform-config-inspect`, `jq` commands are available in your PATH.
Else you can use [the same docker image as the CI](https://hub.docker.com/r/claranet/datadog-terraform) on every other platforms
With this command run from the root of the repository you will get exactly the same execution as the pipeline (and so the same result also):
```
$ docker run --rm -v "$PWD:/work" claranet/datadog-terraform /work/scripts/auto_update.sh
```

View File

@ -1,13 +1,8 @@
#!/bin/bash
set -xueo pipefail
# MON-478 fix sort order behavior on case
export LC_COLLATE=C
source "$(dirname $0)/utils.sh"
cd $(dirname $0)
init scripts
for script in [0-9][0-9]_*.sh; do
./${script} "$(get_scope $1)"
./${script} "$(get_scope ${1:-})"
done

View File

@ -1,6 +1,6 @@
#!/bin/bash
function goto_root {
function goto_root() {
script_dir=$(dirname $0)
if [[ "$script_dir" == "." ]]; then
cd ..
@ -9,19 +9,35 @@ function goto_root {
fi
}
function get_scope {
TO_PARSE="."
if [ ! -z ${1+x} ]; then
function init() {
set -euo pipefail
if [[ ${GITLAB_CI:-} == "true" ]]; then
set -x
fi
# MON-478 fix sort order behavior on case
export LC_COLLATE=C
goto_root
if ! [ -z ${1:-} ]; then
cd "$1"
fi
}
function get_scope() {
TO_PARSE="./"
if [ ! -z ${1+x} ] && [ $1 != "." ]; then
TO_PARSE="$1"
fi
if [[ $TO_PARSE != ./* ]]; then
TO_PARSE="./${TO_PARSE}"
fi
echo $TO_PARSE
}
function list_dirs {
function list_dirs() {
echo ${1} | awk -F '/' '{$1=""; print $0}' | cut -c 2-
}
function get_name {
function get_name() {
regex='^[[:space:]]+name[[:space:]]+=[[:space:]]+"\$.*\[.*\][[:space:]]+(.*)"$'
if [[ "${1}" =~ ${regex} ]]; then
name="${BASH_REMATCH[1]}"
@ -35,3 +51,7 @@ function get_name {
echo $name
return 0
}
function browse_modules() {
find "$1" -name "$2" -exec dirname "{}" \; | sort -fdbiu
}

View File

@ -21,8 +21,8 @@ Creates DataDog monitors with the following checks:
- CPU load 5 ratio
- CPU usage
- Disk inodes usage
- Disk Space usage forecast
- Disk space usage
- Disk Space usage forecast
- Usable Memory
## Inputs

View File

@ -3,9 +3,9 @@ output "cpu_id" {
value = datadog_monitor.cpu.*.id
}
output "load_id" {
description = "id for monitor load"
value = datadog_monitor.load.*.id
output "disk_inodes_id" {
description = "id for monitor disk_inodes"
value = datadog_monitor.disk_inodes.*.id
}
output "disk_space_id" {
@ -18,9 +18,9 @@ output "disk_space_forecast_id" {
value = datadog_monitor.disk_space_forecast.*.id
}
output "disk_inodes_id" {
description = "id for monitor disk_inodes"
value = datadog_monitor.disk_inodes.*.id
output "load_id" {
description = "id for monitor load"
value = datadog_monitor.load.*.id
}
output "memory_id" {