Merge branch 'MON-236_add_prefix_to_name' into 'master'

Resolve MON-236 "Add prefix to name"

Closes MON-236

See merge request claranet/pt-monitoring/projects/datadog/terraform/monitors!51
This commit is contained in:
Quentin Manfroi 2019-04-24 11:37:26 +02:00
commit 427798b91a
163 changed files with 553 additions and 229 deletions

View File

@ -35,6 +35,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -33,6 +33,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Datadog monitors variables # Datadog monitors variables
variable "ark_schedules_monitor_message" { variable "ark_schedules_monitor_message" {

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "ark_schedules_monitor" { resource "datadog_monitor" "ark_schedules_monitor" {
count = "${var.ark_schedules_enabled == "true" ? 1 : 0}" count = "${var.ark_schedules_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Ark backup failed" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Ark backup failed"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.ark_schedules_monitor_message, var.message)}" message = "${coalesce(var.ark_schedules_monitor_message, var.message)}"

View File

@ -34,6 +34,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -33,6 +33,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Datadog monitors variables # Datadog monitors variables
variable "apiserver_silenced" { variable "apiserver_silenced" {

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "apiserver" { resource "datadog_monitor" "apiserver" {
count = "${var.apiserver_enabled == "true" ? 1 : 0}" count = "${var.apiserver_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes API server does not respond" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes API server does not respond"
message = "${coalesce(var.apiserver_message, var.message)}" message = "${coalesce(var.apiserver_message, var.message)}"
type = "service check" type = "service check"

View File

@ -47,6 +47,7 @@ Creates DataDog monitors with the following checks:
| ingress\_5xx\_timeframe | Monitor timeframe for Ingress 5xx errors [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | ingress\_5xx\_timeframe | Monitor timeframe for Ingress 5xx errors [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "nginx_ingress_too_many_5xx" { resource "datadog_monitor" "nginx_ingress_too_many_5xx" {
count = "${var.ingress_5xx_enabled == "true" ? 1 : 0}" count = "${var.ingress_5xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Nginx Ingress 5xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Nginx Ingress 5xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.ingress_5xx_message, var.message)}" message = "${coalesce(var.ingress_5xx_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -34,7 +34,7 @@ resource "datadog_monitor" "nginx_ingress_too_many_5xx" {
resource "datadog_monitor" "nginx_ingress_too_many_4xx" { resource "datadog_monitor" "nginx_ingress_too_many_4xx" {
count = "${var.ingress_4xx_enabled == "true" ? 1 : 0}" count = "${var.ingress_4xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Nginx Ingress 4xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Nginx Ingress 4xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.ingress_4xx_message, var.message)}" message = "${coalesce(var.ingress_4xx_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -67,6 +67,7 @@ Creates DataDog monitors with the following checks:
| node\_unschedulable\_silenced | Groups to mute for node unschedulable monitor | map | `{}` | no | | node\_unschedulable\_silenced | Groups to mute for node unschedulable monitor | map | `{}` | no |
| node\_unschedulable\_time\_aggregator | Monitor aggregator for node unschedulable [available values: min, max or avg] | string | `"min"` | no | | node\_unschedulable\_time\_aggregator | Monitor aggregator for node unschedulable [available values: min, max or avg] | string | `"min"` | no |
| node\_unschedulable\_timeframe | Monitor timeframe for node unschedulable [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_1h"` | no | | node\_unschedulable\_timeframe | Monitor timeframe for node unschedulable [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_1h"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| ready\_enabled | Flag to enable Node ready monitor | string | `"true"` | no | | ready\_enabled | Flag to enable Node ready monitor | string | `"true"` | no |
| ready\_extra\_tags | Extra tags for Node ready monitor | list | `[]` | no | | ready\_extra\_tags | Extra tags for Node ready monitor | list | `[]` | no |
| ready\_message | Custom message for Node ready monitor | string | `""` | no | | ready\_message | Custom message for Node ready monitor | string | `""` | no |

View File

@ -33,6 +33,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Datadog monitors variables # Datadog monitors variables
variable "disk_pressure_silenced" { variable "disk_pressure_silenced" {

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "disk_pressure" { resource "datadog_monitor" "disk_pressure" {
count = "${var.disk_pressure_enabled == "true" ? 1 : 0}" count = "${var.disk_pressure_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node Disk pressure" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node Disk pressure"
message = "${coalesce(var.disk_pressure_message, var.message)}" message = "${coalesce(var.disk_pressure_message, var.message)}"
type = "service check" type = "service check"
@ -30,7 +30,7 @@ resource "datadog_monitor" "disk_pressure" {
resource "datadog_monitor" "disk_out" { resource "datadog_monitor" "disk_out" {
count = "${var.disk_out_enabled == "true" ? 1 : 0}" count = "${var.disk_out_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node Out of disk" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node Out of disk"
message = "${coalesce(var.disk_out_message, var.message)}" message = "${coalesce(var.disk_out_message, var.message)}"
type = "service check" type = "service check"
@ -60,7 +60,7 @@ resource "datadog_monitor" "disk_out" {
resource "datadog_monitor" "memory_pressure" { resource "datadog_monitor" "memory_pressure" {
count = "${var.memory_pressure_enabled == "true" ? 1 : 0}" count = "${var.memory_pressure_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node Memory pressure" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node Memory pressure"
message = "${coalesce(var.memory_pressure_message, var.message)}" message = "${coalesce(var.memory_pressure_message, var.message)}"
type = "service check" type = "service check"
@ -90,7 +90,7 @@ resource "datadog_monitor" "memory_pressure" {
resource "datadog_monitor" "ready" { resource "datadog_monitor" "ready" {
count = "${var.ready_enabled == "true" ? 1 : 0}" count = "${var.ready_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node not ready" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node not ready"
message = "${coalesce(var.ready_message, var.message)}" message = "${coalesce(var.ready_message, var.message)}"
type = "service check" type = "service check"
@ -120,7 +120,7 @@ resource "datadog_monitor" "ready" {
resource "datadog_monitor" "kubelet_ping" { resource "datadog_monitor" "kubelet_ping" {
count = "${var.kubelet_ping_enabled == "true" ? 1 : 0}" count = "${var.kubelet_ping_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node Kubelet API does not respond" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node Kubelet API does not respond"
message = "${coalesce(var.kubelet_ping_message, var.message)}" message = "${coalesce(var.kubelet_ping_message, var.message)}"
type = "service check" type = "service check"
@ -150,7 +150,7 @@ resource "datadog_monitor" "kubelet_ping" {
resource "datadog_monitor" "kubelet_syncloop" { resource "datadog_monitor" "kubelet_syncloop" {
count = "${var.kubelet_syncloop_enabled == "true" ? 1 : 0}" count = "${var.kubelet_syncloop_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node Kubelet sync loop that updates containers does not work" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node Kubelet sync loop that updates containers does not work"
message = "${coalesce(var.kubelet_syncloop_message, var.message)}" message = "${coalesce(var.kubelet_syncloop_message, var.message)}"
type = "service check" type = "service check"
@ -180,7 +180,7 @@ resource "datadog_monitor" "kubelet_syncloop" {
resource "datadog_monitor" "unregister_net_device" { resource "datadog_monitor" "unregister_net_device" {
count = "${var.unregister_net_device_enabled == "true" ? 1 : 0}" count = "${var.unregister_net_device_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node Frequent unregister net device" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node Frequent unregister net device"
type = "event alert" type = "event alert"
message = "${coalesce(var.unregister_net_device_message, var.message)}" message = "${coalesce(var.unregister_net_device_message, var.message)}"
@ -203,7 +203,7 @@ resource "datadog_monitor" "unregister_net_device" {
resource "datadog_monitor" "node_unschedulable" { resource "datadog_monitor" "node_unschedulable" {
count = "${var.node_unschedulable_enabled == "true" ? 1 : 0}" count = "${var.node_unschedulable_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Node unschedulable" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node unschedulable"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.node_unschedulable_message, var.message)}" message = "${coalesce(var.node_unschedulable_message, var.message)}"

View File

@ -53,6 +53,7 @@ Creates DataDog monitors with the following checks:
| pod\_phase\_status\_silenced | Groups to mute for Pod phase status monitor | map | `{}` | no | | pod\_phase\_status\_silenced | Groups to mute for Pod phase status monitor | map | `{}` | no |
| pod\_phase\_status\_time\_aggregator | Monitor aggregator for Pod phase status [available values: min, max or avg] | string | `"max"` | no | | pod\_phase\_status\_time\_aggregator | Monitor aggregator for Pod phase status [available values: min, max or avg] | string | `"max"` | no |
| pod\_phase\_status\_timeframe | Monitor timeframe for Pod phase status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | pod\_phase\_status\_timeframe | Monitor timeframe for Pod phase status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -33,6 +33,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Datadog monitors variables # Datadog monitors variables
variable "pod_phase_status_silenced" { variable "pod_phase_status_silenced" {

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "pod_phase_status" { resource "datadog_monitor" "pod_phase_status" {
count = "${var.pod_phase_status_enabled == "true" ? 1 : 0}" count = "${var.pod_phase_status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Pod phase status failed" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Pod phase status failed"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.pod_phase_status_message, var.message)}" message = "${coalesce(var.pod_phase_status_message, var.message)}"
@ -30,7 +30,7 @@ resource "datadog_monitor" "pod_phase_status" {
resource "datadog_monitor" "error" { resource "datadog_monitor" "error" {
count = "${var.error_enabled == "true" ? 1 : 0}" count = "${var.error_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Pod errors {{#is_alert}}{{{comparator}}} {{threshold}} times ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} times ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Pod errors {{#is_alert}}{{{comparator}}} {{threshold}} times ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} times ({{value}}){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.error_message, var.message)}" message = "${coalesce(var.error_message, var.message)}"
@ -62,7 +62,7 @@ resource "datadog_monitor" "error" {
resource "datadog_monitor" "crashloopbackoff" { resource "datadog_monitor" "crashloopbackoff" {
count = "${var.crashloopbackoff_enabled == "true" ? 1 : 0}" count = "${var.crashloopbackoff_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Pod CrashLoopBackOff" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Pod CrashLoopBackOff"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.crashloopbackoff_message, var.message)}" message = "${coalesce(var.crashloopbackoff_message, var.message)}"

View File

@ -43,6 +43,7 @@ Creates DataDog monitors with the following checks:
| job\_threshold\_warning | Job monitor (warning threshold) | string | `"3"` | no | | job\_threshold\_warning | Job monitor (warning threshold) | string | `"3"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| replica\_available\_enabled | Flag to enable Available replica monitor | string | `"true"` | no | | replica\_available\_enabled | Flag to enable Available replica monitor | string | `"true"` | no |
| replica\_available\_extra\_tags | Extra tags for Available replicamonitor | list | `[]` | no | | replica\_available\_extra\_tags | Extra tags for Available replicamonitor | list | `[]` | no |
| replica\_available\_message | Custom message for Available replica monitor | string | `""` | no | | replica\_available\_message | Custom message for Available replica monitor | string | `""` | no |

View File

@ -33,6 +33,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Datadog monitors variables # Datadog monitors variables
variable "job_silenced" { variable "job_silenced" {

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "job" { resource "datadog_monitor" "job" {
count = "${var.job_enabled == "true" ? 1 : 0}" count = "${var.job_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes job failed" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes job failed"
message = "${coalesce(var.job_message, var.message)}" message = "${coalesce(var.job_message, var.message)}"
type = "service check" type = "service check"
@ -30,7 +30,7 @@ resource "datadog_monitor" "job" {
resource "datadog_monitor" "cronjob" { resource "datadog_monitor" "cronjob" {
count = "${var.cronjob_enabled == "true" ? 1 : 0}" count = "${var.cronjob_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes cronjob scheduling failed" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes cronjob scheduling failed"
message = "${coalesce(var.cronjob_message, var.message)}" message = "${coalesce(var.cronjob_message, var.message)}"
type = "service check" type = "service check"
@ -60,7 +60,7 @@ resource "datadog_monitor" "cronjob" {
resource "datadog_monitor" "replica_available" { resource "datadog_monitor" "replica_available" {
count = "${var.replica_available_enabled == "true" ? 1 : 0}" count = "${var.replica_available_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Available replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Available replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.replica_available_message, var.message)}" message = "${coalesce(var.replica_available_message, var.message)}"
@ -92,7 +92,7 @@ resource "datadog_monitor" "replica_available" {
resource "datadog_monitor" "replica_ready" { resource "datadog_monitor" "replica_ready" {
count = "${var.replica_ready_enabled == "true" ? 1 : 0}" count = "${var.replica_ready_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Ready replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Ready replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.replica_ready_message, var.message)}" message = "${coalesce(var.replica_ready_message, var.message)}"
@ -124,7 +124,7 @@ resource "datadog_monitor" "replica_ready" {
resource "datadog_monitor" "replica_current" { resource "datadog_monitor" "replica_current" {
count = "${var.replica_current_enabled == "true" ? 1 : 0}" count = "${var.replica_current_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kubernetes Current replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Current replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.replica_current_message, var.message)}" message = "${coalesce(var.replica_current_message, var.message)}"

View File

@ -81,6 +81,7 @@ Creates DataDog monitors with the following checks:
| latency\_timeframe | Monitor timeframe for ALB latency [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | latency\_timeframe | Monitor timeframe for ALB latency [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -34,6 +34,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Datadog monitors variables # Datadog monitors variables
variable "alb_no_healthy_instances_silenced" { variable "alb_no_healthy_instances_silenced" {

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "ALB_no_healthy_instances" { resource "datadog_monitor" "ALB_no_healthy_instances" {
count = "${var.alb_no_healthy_instances_enabled == "true" ? 1 : 0}" count = "${var.alb_no_healthy_instances_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ALB healthy instances {{#is_alert}}is at 0{{/is_alert}}{{#is_warning}}is at {{value}}%{{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB healthy instances {{#is_alert}}is at 0{{/is_alert}}{{#is_warning}}is at {{value}}%{{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.alb_no_healthy_instances_message, var.message)}" message = "${coalesce(var.alb_no_healthy_instances_message, var.message)}"
@ -33,7 +33,7 @@ resource "datadog_monitor" "ALB_no_healthy_instances" {
resource "datadog_monitor" "ALB_latency" { resource "datadog_monitor" "ALB_latency" {
count = "${var.latency_enabled == "true" ? 1 : 0}" count = "${var.latency_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ALB latency {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB latency {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.latency_message, var.message)}" message = "${coalesce(var.latency_message, var.message)}"
@ -64,7 +64,7 @@ resource "datadog_monitor" "ALB_latency" {
resource "datadog_monitor" "ALB_httpcode_5xx" { resource "datadog_monitor" "ALB_httpcode_5xx" {
count = "${var.httpcode_alb_5xx_enabled == "true" ? 1 : 0}" count = "${var.httpcode_alb_5xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ALB HTTP code 5xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB HTTP code 5xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.httpcode_alb_5xx_message, var.message)}" message = "${coalesce(var.httpcode_alb_5xx_message, var.message)}"
@ -96,7 +96,7 @@ resource "datadog_monitor" "ALB_httpcode_5xx" {
resource "datadog_monitor" "ALB_httpcode_4xx" { resource "datadog_monitor" "ALB_httpcode_4xx" {
count = "${var.httpcode_alb_4xx_enabled == "true" ? 1 : 0}" count = "${var.httpcode_alb_4xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ALB HTTP code 4xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB HTTP code 4xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.httpcode_alb_4xx_message, var.message)}" message = "${coalesce(var.httpcode_alb_4xx_message, var.message)}"
@ -128,7 +128,7 @@ resource "datadog_monitor" "ALB_httpcode_4xx" {
resource "datadog_monitor" "ALB_httpcode_target_5xx" { resource "datadog_monitor" "ALB_httpcode_target_5xx" {
count = "${var.httpcode_target_5xx_enabled == "true" ? 1 : 0}" count = "${var.httpcode_target_5xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ALB target HTTP code 5xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB target HTTP code 5xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.httpcode_target_5xx_message, var.message)}" message = "${coalesce(var.httpcode_target_5xx_message, var.message)}"
@ -160,7 +160,7 @@ resource "datadog_monitor" "ALB_httpcode_target_5xx" {
resource "datadog_monitor" "ALB_httpcode_target_4xx" { resource "datadog_monitor" "ALB_httpcode_target_4xx" {
count = "${var.httpcode_target_4xx_enabled == "true" ? 1 : 0}" count = "${var.httpcode_target_4xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ALB target HTTP code 4xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB target HTTP code 4xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.httpcode_target_4xx_message, var.message)}" message = "${coalesce(var.httpcode_target_4xx_message, var.message)}"

View File

@ -54,6 +54,7 @@ Creates DataDog monitors with the following checks:
| latency\_timeframe | Monitor timeframe for API latency [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | latency\_timeframe | Monitor timeframe for API latency [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -22,6 +22,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
################################### ###################################
### LATENCY VARIABLES ### ### LATENCY VARIABLES ###
################################### ###################################

View File

@ -1,7 +1,7 @@
# Monitoring Api Gateway latency # Monitoring Api Gateway latency
resource "datadog_monitor" "API_Gateway_latency" { resource "datadog_monitor" "API_Gateway_latency" {
count = "${var.latency_enabled == "true" ? 1 : 0}" count = "${var.latency_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Gateway latency {{#is_alert}}{{{comparator}}} {{threshold}}ms ({{value}}ms){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}ms ({{value}}ms){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Gateway latency {{#is_alert}}{{{comparator}}} {{threshold}}ms ({{value}}ms){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}ms ({{value}}ms){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.latency_message, var.message)}" message = "${coalesce(var.latency_message, var.message)}"
@ -33,7 +33,7 @@ resource "datadog_monitor" "API_Gateway_latency" {
# Monitoring API Gateway 5xx errors percent # Monitoring API Gateway 5xx errors percent
resource "datadog_monitor" "API_http_5xx_errors_count" { resource "datadog_monitor" "API_http_5xx_errors_count" {
count = "${var.http_5xx_requests_enabled == "true" ? 1 : 0}" count = "${var.http_5xx_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Gateway HTTP 5xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Gateway HTTP 5xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.http_5xx_requests_message, var.message)}" message = "${coalesce(var.http_5xx_requests_message, var.message)}"
@ -66,7 +66,7 @@ resource "datadog_monitor" "API_http_5xx_errors_count" {
# Monitoring API Gateway 4xx errors percent # Monitoring API Gateway 4xx errors percent
resource "datadog_monitor" "API_http_4xx_errors_count" { resource "datadog_monitor" "API_http_4xx_errors_count" {
count = "${var.http_4xx_requests_enabled == "true" ? 1 : 0}" count = "${var.http_4xx_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Gateway HTTP 4xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Gateway HTTP 4xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.http_4xx_requests_message, var.message)}" message = "${coalesce(var.http_4xx_requests_message, var.message)}"

View File

@ -69,6 +69,7 @@ Creates DataDog monitors with the following checks:
| no\_connection\_silenced | Groups to mute for Elasticache no connection monitor | map | `{}` | no | | no\_connection\_silenced | Groups to mute for Elasticache no connection monitor | map | `{}` | no |
| no\_connection\_time\_aggregator | Monitor aggregator for Elasticache no connection [available values: min, max or avg] | string | `"min"` | no | | no\_connection\_time\_aggregator | Monitor aggregator for Elasticache no connection [available values: min, max or avg] | string | `"min"` | no |
| no\_connection\_timeframe | Monitor timeframe for Elasticache no connection [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | no\_connection\_timeframe | Monitor timeframe for Elasticache no connection [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| swap\_enabled | Flag to enable Elasticache swap monitor | string | `"true"` | no | | swap\_enabled | Flag to enable Elasticache swap monitor | string | `"true"` | no |
| swap\_extra\_tags | Extra tags for Elasticache swap monitor | list | `[]` | no | | swap\_extra\_tags | Extra tags for Elasticache swap monitor | list | `[]` | no |
| swap\_message | Custom message for Elasticache swap monitor | string | `""` | no | | swap\_message | Custom message for Elasticache swap monitor | string | `""` | no |

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "elasticache_eviction" { resource "datadog_monitor" "elasticache_eviction" {
count = "${var.eviction_enabled == "true" ? 1 : 0}" count = "${var.eviction_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache eviction {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache eviction {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}"
message = "${coalesce(var.eviction_message, var.message)}" message = "${coalesce(var.eviction_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -33,7 +33,7 @@ resource "datadog_monitor" "elasticache_eviction" {
resource "datadog_monitor" "elasticache_max_connection" { resource "datadog_monitor" "elasticache_max_connection" {
count = "${var.max_connection_enabled == "true" ? 1 : 0}" count = "${var.max_connection_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache max connections reached {{#is_alert}}{{{comparator}}} {{threshold}} {{/is_alert}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache max connections reached {{#is_alert}}{{{comparator}}} {{threshold}} {{/is_alert}}"
message = "${coalesce(var.max_connection_message, var.message)}" message = "${coalesce(var.max_connection_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -61,7 +61,7 @@ resource "datadog_monitor" "elasticache_max_connection" {
resource "datadog_monitor" "elasticache_no_connection" { resource "datadog_monitor" "elasticache_no_connection" {
count = "${var.no_connection_enabled == "true" ? 1 : 0}" count = "${var.no_connection_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache connections {{#is_alert}}{{{comparator}}} {{threshold}} {{/is_alert}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache connections {{#is_alert}}{{{comparator}}} {{threshold}} {{/is_alert}}"
message = "${coalesce(var.no_connection_message, var.message)}" message = "${coalesce(var.no_connection_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -89,7 +89,7 @@ resource "datadog_monitor" "elasticache_no_connection" {
resource "datadog_monitor" "elasticache_swap" { resource "datadog_monitor" "elasticache_swap" {
count = "${var.swap_enabled == "true" ? 1 : 0}" count = "${var.swap_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache swap {{#is_alert}}{{{comparator}}} {{threshold}}MB ({{value}}MB){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}MB ({{value}}MB){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache swap {{#is_alert}}{{{comparator}}} {{threshold}}MB ({{value}}MB){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}MB ({{value}}MB){{/is_warning}}"
message = "${coalesce(var.swap_message, var.message)}" message = "${coalesce(var.swap_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -122,7 +122,7 @@ resource "datadog_monitor" "elasticache_swap" {
resource "datadog_monitor" "elasticache_free_memory" { resource "datadog_monitor" "elasticache_free_memory" {
count = "${var.free_memory_enabled == "true" ? 1 : 0}" count = "${var.free_memory_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache free memory {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache free memory {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.free_memory_message, var.message)}" message = "${coalesce(var.free_memory_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -155,7 +155,7 @@ resource "datadog_monitor" "elasticache_free_memory" {
resource "datadog_monitor" "elasticache_eviction_growing" { resource "datadog_monitor" "elasticache_eviction_growing" {
count = "${var.eviction_growing_enabled == "true" ? 1 : 0}" count = "${var.eviction_growing_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache evictions is growing {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache evictions is growing {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.eviction_growing_message, var.message)}" message = "${coalesce(var.eviction_growing_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -46,6 +46,7 @@ Creates DataDog monitors with the following checks:
| get\_hits\_timeframe | Monitor timeframe for Elasticache memcached get hits [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no | | get\_hits\_timeframe | Monitor timeframe for Elasticache memcached get hits [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "memcached_get_hits" { resource "datadog_monitor" "memcached_get_hits" {
count = "${var.get_hits_enabled == "true" ? 1 : 0}" count = "${var.get_hits_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache memcached cache hit ratio {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache memcached cache hit ratio {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.get_hits_message, var.message)}" message = "${coalesce(var.get_hits_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -35,7 +35,7 @@ resource "datadog_monitor" "memcached_get_hits" {
resource "datadog_monitor" "memcached_cpu_high" { resource "datadog_monitor" "memcached_cpu_high" {
count = "${var.cpu_high_enabled == "true" ? 1 : 0}" count = "${var.cpu_high_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache memcached CPU {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache memcached CPU {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_high_message, var.message)}" message = "${coalesce(var.cpu_high_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -53,6 +53,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| replication\_lag\_enabled | Flag to enable Elasticache redis replication lag monitor | string | `"true"` | no | | replication\_lag\_enabled | Flag to enable Elasticache redis replication lag monitor | string | `"true"` | no |
| replication\_lag\_extra\_tags | Extra tags for Elasticache redis replication lag monitor | list | `[]` | no | | replication\_lag\_extra\_tags | Extra tags for Elasticache redis replication lag monitor | list | `[]` | no |
| replication\_lag\_message | Custom message for Elasticache redis replication lag monitor | string | `""` | no | | replication\_lag\_message | Custom message for Elasticache redis replication lag monitor | string | `""` | no |

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "redis_cache_hits" { resource "datadog_monitor" "redis_cache_hits" {
count = "${var.cache_hits_enabled == "true" ? 1 : 0}" count = "${var.cache_hits_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache redis cache hit ratio {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis cache hit ratio {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cache_hits_message, var.message)}" message = "${coalesce(var.cache_hits_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -35,7 +35,7 @@ resource "datadog_monitor" "redis_cache_hits" {
resource "datadog_monitor" "redis_cpu_high" { resource "datadog_monitor" "redis_cpu_high" {
count = "${var.cpu_high_enabled == "true" ? 1 : 0}" count = "${var.cpu_high_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache redis CPU {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis CPU {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_high_message, var.message)}" message = "${coalesce(var.cpu_high_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -63,7 +63,7 @@ resource "datadog_monitor" "redis_cpu_high" {
resource "datadog_monitor" "redis_replication_lag" { resource "datadog_monitor" "redis_replication_lag" {
count = "${var.replication_lag_enabled == "true" ? 1 : 0}" count = "${var.replication_lag_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache redis replication lag {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis replication lag {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
message = "${coalesce(var.replication_lag_message, var.message)}" message = "${coalesce(var.replication_lag_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -96,7 +96,7 @@ resource "datadog_monitor" "redis_replication_lag" {
resource "datadog_monitor" "redis_commands" { resource "datadog_monitor" "redis_commands" {
count = "${var.commands_enabled == "true" ? 1 : 0}" count = "${var.commands_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Elasticache redis is receiving no commands" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis is receiving no commands"
message = "${coalesce(var.commands_message, var.message)}" message = "${coalesce(var.commands_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -55,6 +55,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -5,7 +5,7 @@
Workaround : in the query, we add "0.1" to the result and we use the comparator ">=". No alert was triggered without that. */ Workaround : in the query, we add "0.1" to the result and we use the comparator ">=". No alert was triggered without that. */
resource "datadog_monitor" "es_cluster_status" { resource "datadog_monitor" "es_cluster_status" {
count = "${var.es_cluster_status_enabled == "true" ? 1 : 0}" count = "${var.es_cluster_status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ElasticSearch cluster status is not green" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ElasticSearch cluster status is not green"
message = "${coalesce(var.es_cluster_status_message, var.message)}" message = "${coalesce(var.es_cluster_status_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -40,7 +40,7 @@ resource "datadog_monitor" "es_cluster_status" {
### Elasticsearch cluster free storage space monitor ### ### Elasticsearch cluster free storage space monitor ###
resource "datadog_monitor" "es_free_space_low" { resource "datadog_monitor" "es_free_space_low" {
count = "${var.diskspace_enabled == "true" ? 1 : 0}" count = "${var.diskspace_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ElasticSearch cluster free storage space {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ElasticSearch cluster free storage space {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.diskspace_message, var.message)}" message = "${coalesce(var.diskspace_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -75,7 +75,7 @@ resource "datadog_monitor" "es_free_space_low" {
### Elasticsearch cluster CPU monitor ### ### Elasticsearch cluster CPU monitor ###
resource "datadog_monitor" "es_cpu_90_15min" { resource "datadog_monitor" "es_cpu_90_15min" {
count = "${var.cpu_enabled == "true" ? 1 : 0}" count = "${var.cpu_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ElasticSearch cluster CPU high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ElasticSearch cluster CPU high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_message, var.message)}" message = "${coalesce(var.cpu_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -77,6 +77,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "ELB_no_healthy_instances" { resource "datadog_monitor" "ELB_no_healthy_instances" {
count = "${var.elb_no_healthy_instance_enabled == "true" ? 1 : 0}" count = "${var.elb_no_healthy_instance_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ELB healthy instances {{#is_alert}}is at 0{{/is_alert}}{{#is_warning}}is at {{value}}%{{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ELB healthy instances {{#is_alert}}is at 0{{/is_alert}}{{#is_warning}}is at {{value}}%{{/is_warning}}"
message = "${coalesce(var.elb_no_healthy_instance_message, var.message)}" message = "${coalesce(var.elb_no_healthy_instance_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -35,7 +35,7 @@ resource "datadog_monitor" "ELB_no_healthy_instances" {
resource "datadog_monitor" "ELB_too_much_4xx" { resource "datadog_monitor" "ELB_too_much_4xx" {
count = "${var.elb_4xx_enabled == "true" ? 1 : 0}" count = "${var.elb_4xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ELB 4xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ELB 4xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.elb_4xx_message, var.message)}" message = "${coalesce(var.elb_4xx_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -69,7 +69,7 @@ resource "datadog_monitor" "ELB_too_much_4xx" {
resource "datadog_monitor" "ELB_too_much_5xx" { resource "datadog_monitor" "ELB_too_much_5xx" {
count = "${var.elb_5xx_enabled == "true" ? 1 : 0}" count = "${var.elb_5xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ELB 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ELB 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.elb_5xx_message, var.message)}" message = "${coalesce(var.elb_5xx_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -103,7 +103,7 @@ resource "datadog_monitor" "ELB_too_much_5xx" {
resource "datadog_monitor" "ELB_too_much_4xx_backend" { resource "datadog_monitor" "ELB_too_much_4xx_backend" {
count = "${var.elb_backend_4xx_enabled == "true" ? 1 : 0}" count = "${var.elb_backend_4xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ELB backend 4xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ELB backend 4xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.elb_backend_4xx_message, var.message)}" message = "${coalesce(var.elb_backend_4xx_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -137,7 +137,7 @@ resource "datadog_monitor" "ELB_too_much_4xx_backend" {
resource "datadog_monitor" "ELB_too_much_5xx_backend" { resource "datadog_monitor" "ELB_too_much_5xx_backend" {
count = "${var.elb_backend_5xx_enabled == "true" ? 1 : 0}" count = "${var.elb_backend_5xx_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ELB backend 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ELB backend 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.elb_backend_5xx_message, var.message)}" message = "${coalesce(var.elb_backend_5xx_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -171,7 +171,7 @@ resource "datadog_monitor" "ELB_too_much_5xx_backend" {
resource "datadog_monitor" "ELB_backend_latency" { resource "datadog_monitor" "ELB_backend_latency" {
count = "${var.elb_backend_latency_enabled == "true" ? 1 : 0}" count = "${var.elb_backend_latency_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] ELB latency too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ELB latency too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
message = "${coalesce(var.elb_backend_latency_message, var.message)}" message = "${coalesce(var.elb_backend_latency_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -34,6 +34,7 @@ Creates DataDog monitors with the following checks:
| incoming\_records\_timeframe | Monitor timeframe for incoming records metrics evaluation [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no | | incoming\_records\_timeframe | Monitor timeframe for incoming records metrics evaluation [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,7 +1,7 @@
### Kinesis Firehose Incoming records ### ### Kinesis Firehose Incoming records ###
resource "datadog_monitor" "firehose_incoming_records" { resource "datadog_monitor" "firehose_incoming_records" {
count = "${var.incoming_records_enabled == "true" ? 1 : 0}" count = "${var.incoming_records_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Kinesis Firehose No incoming records" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kinesis Firehose No incoming records"
message = "${coalesce(var.incoming_records_message, var.message)}" message = "${coalesce(var.incoming_records_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -36,6 +36,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,7 +1,7 @@
### RDS Aurora Mysql Replica Lag monitor ### ### RDS Aurora Mysql Replica Lag monitor ###
resource "datadog_monitor" "rds_aurora_mysql_replica_lag" { resource "datadog_monitor" "rds_aurora_mysql_replica_lag" {
count = "${var.aurora_replicalag_enabled == "true" ? 1 : 0}" count = "${var.aurora_replicalag_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] RDS Aurora Mysql replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS Aurora Mysql replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}"
message = "${coalesce(var.aurora_replicalag_message, var.message)}" message = "${coalesce(var.aurora_replicalag_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -36,6 +36,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,7 +1,7 @@
### RDS Aurora Postgresql Replica Lag monitor ### ### RDS Aurora Postgresql Replica Lag monitor ###
resource "datadog_monitor" "rds_aurora_postgresql_replica_lag" { resource "datadog_monitor" "rds_aurora_postgresql_replica_lag" {
count = "${var.aurora_replicalag_enabled == "true" ? 1 : 0}" count = "${var.aurora_replicalag_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] RDS Aurora PostgreSQL replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS Aurora PostgreSQL replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}"
message = "${coalesce(var.aurora_replicalag_message, var.message)}" message = "${coalesce(var.aurora_replicalag_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -47,6 +47,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| replicalag\_enabled | Flag to enable RDS replica lag monitor | string | `"true"` | no | | replicalag\_enabled | Flag to enable RDS replica lag monitor | string | `"true"` | no |
| replicalag\_extra\_tags | Extra tags for RDS replica lag monitor | list | `[]` | no | | replicalag\_extra\_tags | Extra tags for RDS replica lag monitor | list | `[]` | no |
| replicalag\_message | Custom message for RDS replica lag monitor | string | `""` | no | | replicalag\_message | Custom message for RDS replica lag monitor | string | `""` | no |

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,7 +1,7 @@
### RDS instance CPU monitor ### ### RDS instance CPU monitor ###
resource "datadog_monitor" "rds_cpu_90_15min" { resource "datadog_monitor" "rds_cpu_90_15min" {
count = "${var.cpu_enabled == "true" ? 1 : 0}" count = "${var.cpu_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] RDS instance CPU high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS instance CPU high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_message, var.message)}" message = "${coalesce(var.cpu_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -34,7 +34,7 @@ resource "datadog_monitor" "rds_cpu_90_15min" {
### RDS instance free space monitor ### ### RDS instance free space monitor ###
resource "datadog_monitor" "rds_free_space_low" { resource "datadog_monitor" "rds_free_space_low" {
count = "${var.diskspace_enabled == "true" ? 1 : 0}" count = "${var.diskspace_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] RDS instance free space {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS instance free space {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.diskspace_message, var.message)}" message = "${coalesce(var.diskspace_message, var.message)}"
type = "metric alert" type = "metric alert"
@ -68,7 +68,7 @@ resource "datadog_monitor" "rds_free_space_low" {
### RDS Replica Lag monitor ### ### RDS Replica Lag monitor ###
resource "datadog_monitor" "rds_replica_lag" { resource "datadog_monitor" "rds_replica_lag" {
count = "${var.replicalag_enabled == "true" ? 1 : 0}" count = "${var.replicalag_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] RDS replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}"
message = "${coalesce(var.replicalag_message, var.message)}" message = "${coalesce(var.replicalag_message, var.message)}"
type = "metric alert" type = "metric alert"

View File

@ -27,6 +27,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags | Tags used for metrics filtering | string | `"*"` | no | | filter\_tags | Tags used for metrics filtering | string | `"*"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| vpn\_status\_enabled | Flag to enable VPN status monitor | string | `"true"` | no | | vpn\_status\_enabled | Flag to enable VPN status monitor | string | `"true"` | no |
| vpn\_status\_extra\_tags | Extra tags for VPN status monitor | list | `[]` | no | | vpn\_status\_extra\_tags | Extra tags for VPN status monitor | list | `[]` | no |
| vpn\_status\_message | Custom message for VPN status monitor | string | `""` | no | | vpn\_status\_message | Custom message for VPN status monitor | string | `""` | no |

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "VPN_status" { resource "datadog_monitor" "VPN_status" {
count = "${var.vpn_status_enabled == "true" ? 1 : 0}" count = "${var.vpn_status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] VPN tunnel down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] VPN tunnel down"
message = "${coalesce(var.vpn_status_message, var.message)}" message = "${coalesce(var.vpn_status_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -49,6 +49,7 @@ Creates DataDog monitors with the following checks:
| other\_requests\_threshold\_warning | Warning regarding acceptable percent of other requests | string | `"50"` | no | | other\_requests\_threshold\_warning | Warning regarding acceptable percent of other requests | string | `"50"` | no |
| other\_requests\_time\_aggregator | Monitor aggregator for API Management other requests [available values: min, max or avg] | string | `"min"` | no | | other\_requests\_time\_aggregator | Monitor aggregator for API Management other requests [available values: min, max or avg] | string | `"min"` | no |
| other\_requests\_timeframe | Monitor timeframe for API Management other requests [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | other\_requests\_timeframe | Monitor timeframe for API Management other requests [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable API Management status monitor | string | `"true"` | no | | status\_enabled | Flag to enable API Management status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for API Management status monitor | list | `[]` | no | | status\_extra\_tags | Extra tags for API Management status monitor | list | `[]` | no |
| status\_message | Custom message for API Management status monitor | string | `""` | no | | status\_message | Custom message for API Management status monitor | string | `""` | no |

View File

@ -19,6 +19,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "filter_tags_use_defaults" { variable "filter_tags_use_defaults" {
description = "Use default filter tags convention" description = "Use default filter tags convention"
default = "true" default = "true"

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "apimgt_status" { resource "datadog_monitor" "apimgt_status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Management is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Management is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -30,7 +30,7 @@ resource "datadog_monitor" "apimgt_status" {
resource "datadog_monitor" "apimgt_failed_requests" { resource "datadog_monitor" "apimgt_failed_requests" {
count = "${var.failed_requests_enabled == "true" ? 1 : 0}" count = "${var.failed_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Management too many failed requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Management too many failed requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_requests_message, var.message)}" message = "${coalesce(var.failed_requests_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -63,7 +63,7 @@ resource "datadog_monitor" "apimgt_failed_requests" {
resource "datadog_monitor" "apimgt_other_requests" { resource "datadog_monitor" "apimgt_other_requests" {
count = "${var.other_requests_enabled == "true" ? 1 : 0}" count = "${var.other_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Management too many other requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Management too many other requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.other_requests_message, var.message)}" message = "${coalesce(var.other_requests_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -96,7 +96,7 @@ resource "datadog_monitor" "apimgt_other_requests" {
resource "datadog_monitor" "apimgt_unauthorized_requests" { resource "datadog_monitor" "apimgt_unauthorized_requests" {
count = "${var.unauthorized_requests_enabled == "true" ? 1 : 0}" count = "${var.unauthorized_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Management too many unauthorized requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Management too many unauthorized requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.unauthorized_requests_message, var.message)}" message = "${coalesce(var.unauthorized_requests_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -129,7 +129,7 @@ resource "datadog_monitor" "apimgt_unauthorized_requests" {
resource "datadog_monitor" "apimgt_successful_requests" { resource "datadog_monitor" "apimgt_successful_requests" {
count = "${var.successful_requests_enabled == "true" ? 1 : 0}" count = "${var.successful_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] API Management successful requests rate too low {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Management successful requests rate too low {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.successful_requests_message, var.message)}" message = "${coalesce(var.successful_requests_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -65,6 +65,7 @@ Creates DataDog monitors with the following checks:
| memory\_usage\_timeframe | Monitor timeframe for App Services memory usage [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | memory\_usage\_timeframe | Monitor timeframe for App Services memory usage [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| response\_time\_enabled | Flag to enable App Services response time monitor | string | `"true"` | no | | response\_time\_enabled | Flag to enable App Services response time monitor | string | `"true"` | no |
| response\_time\_extra\_tags | Extra tags for App Services response time monitor | list | `[]` | no | | response\_time\_extra\_tags | Extra tags for App Services response time monitor | list | `[]` | no |
| response\_time\_message | Custom message for App Services response time monitor | string | `""` | no | | response\_time\_message | Custom message for App Services response time monitor | string | `""` | no |

View File

@ -32,6 +32,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Azure App Services specific variables # Azure App Services specific variables
variable "response_time_silenced" { variable "response_time_silenced" {
description = "Groups to mute for App Services response time monitor" description = "Groups to mute for App Services response time monitor"

View File

@ -1,7 +1,7 @@
# Monitoring App Services response time # Monitoring App Services response time
resource "datadog_monitor" "appservices_response_time" { resource "datadog_monitor" "appservices_response_time" {
count = "${var.response_time_enabled == "true" ? 1 : 0}" count = "${var.response_time_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] App Services response time too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services response time too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.response_time_message, var.message)}" message = "${coalesce(var.response_time_message, var.message)}"
@ -33,7 +33,7 @@ resource "datadog_monitor" "appservices_response_time" {
# Monitoring App Services memory usage # Monitoring App Services memory usage
resource "datadog_monitor" "appservices_memory_usage_count" { resource "datadog_monitor" "appservices_memory_usage_count" {
count = "${var.memory_usage_enabled == "true" ? 1 : 0}" count = "${var.memory_usage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] App Services memory usage {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services memory usage {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.memory_usage_message, var.message)}" message = "${coalesce(var.memory_usage_message, var.message)}"
@ -65,7 +65,7 @@ resource "datadog_monitor" "appservices_memory_usage_count" {
# Monitoring App Services 5xx errors percent # Monitoring App Services 5xx errors percent
resource "datadog_monitor" "appservices_http_5xx_errors_count" { resource "datadog_monitor" "appservices_http_5xx_errors_count" {
count = "${var.http_5xx_requests_enabled == "true" ? 1 : 0}" count = "${var.http_5xx_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] App Services HTTP 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services HTTP 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.http_5xx_requests_message, var.message)}" message = "${coalesce(var.http_5xx_requests_message, var.message)}"
@ -98,7 +98,7 @@ resource "datadog_monitor" "appservices_http_5xx_errors_count" {
# Monitoring App Services 4xx errors percent # Monitoring App Services 4xx errors percent
resource "datadog_monitor" "appservices_http_4xx_errors_count" { resource "datadog_monitor" "appservices_http_4xx_errors_count" {
count = "${var.http_4xx_requests_enabled == "true" ? 1 : 0}" count = "${var.http_4xx_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] App Services HTTP 4xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services HTTP 4xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.http_4xx_requests_message, var.message)}" message = "${coalesce(var.http_4xx_requests_message, var.message)}"
@ -131,7 +131,7 @@ resource "datadog_monitor" "appservices_http_4xx_errors_count" {
# Monitoring App Services HTTP 2xx & 3xx status pages percent # Monitoring App Services HTTP 2xx & 3xx status pages percent
resource "datadog_monitor" "appservices_http_success_status_rate" { resource "datadog_monitor" "appservices_http_success_status_rate" {
count = "${var.http_successful_requests_enabled == "true" ? 1 : 0}" count = "${var.http_successful_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] App Services HTTP successful responses too low {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services HTTP successful responses too low {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.http_successful_requests_message, var.message)}" message = "${coalesce(var.http_successful_requests_message, var.message)}"

View File

@ -38,6 +38,7 @@ Creates DataDog monitors with the following checks:
| latency\_timeframe | Monitor timeframe for Azure Search latency [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | latency\_timeframe | Monitor timeframe for Azure Search latency [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| throttled\_queries\_rate\_enabled | Flag to enable Azure Search throttled queries rate monitor | string | `"true"` | no | | throttled\_queries\_rate\_enabled | Flag to enable Azure Search throttled queries rate monitor | string | `"true"` | no |
| throttled\_queries\_rate\_extra\_tags | Extra tags for Azure Search throttled queries rate monitor | list | `[]` | no | | throttled\_queries\_rate\_extra\_tags | Extra tags for Azure Search throttled queries rate monitor | list | `[]` | no |
| throttled\_queries\_rate\_message | Custom message for Azure Search throttled queries rate monitor | string | `""` | no | | throttled\_queries\_rate\_message | Custom message for Azure Search throttled queries rate monitor | string | `""` | no |

View File

@ -32,6 +32,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Azure Search specific variables # Azure Search specific variables
variable "latency_silenced" { variable "latency_silenced" {
description = "Groups to mute for Azure Search latency monitor" description = "Groups to mute for Azure Search latency monitor"

View File

@ -1,7 +1,7 @@
# Monitoring Azure Search latency # Monitoring Azure Search latency
resource "datadog_monitor" "azure_search_latency" { resource "datadog_monitor" "azure_search_latency" {
count = "${var.latency_enabled == "true" ? 1 : 0}" count = "${var.latency_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Azure Search latency too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Azure Search latency too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.latency_message, var.message)}" message = "${coalesce(var.latency_message, var.message)}"
@ -33,7 +33,7 @@ resource "datadog_monitor" "azure_search_latency" {
# Monitoring Azure Search throttled queries # Monitoring Azure Search throttled queries
resource "datadog_monitor" "azure_search_throttled_queries_rate" { resource "datadog_monitor" "azure_search_throttled_queries_rate" {
count = "${var.throttled_queries_rate_enabled == "true" ? 1 : 0}" count = "${var.throttled_queries_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Azure Search throttled queries rate is too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Azure Search throttled queries rate is too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.throttled_queries_rate_message, var.message)}" message = "${coalesce(var.throttled_queries_rate_message, var.message)}"

View File

@ -56,6 +56,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable Cosmos DB status monitor | string | `"true"` | no | | status\_enabled | Flag to enable Cosmos DB status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Cosmos DB status monitor | list | `[]` | no | | status\_extra\_tags | Extra tags for Cosmos DB status monitor | list | `[]` | no |
| status\_message | Custom message for Cosmos DB status monitor | string | `""` | no | | status\_message | Custom message for Cosmos DB status monitor | string | `""` | no |

View File

@ -32,6 +32,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Azure CosmosDB specific variables # Azure CosmosDB specific variables
variable "status_enabled" { variable "status_enabled" {
description = "Flag to enable Cosmos DB status monitor" description = "Flag to enable Cosmos DB status monitor"

View File

@ -1,7 +1,7 @@
resource "datadog_monitor" "cosmos_db_status" { resource "datadog_monitor" "cosmos_db_status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Cosmos DB is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Cosmos DB is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -34,7 +34,7 @@ resource "datadog_monitor" "cosmos_db_status" {
resource "datadog_monitor" "cosmos_db_4xx_requests" { resource "datadog_monitor" "cosmos_db_4xx_requests" {
count = "${var.cosmos_db_4xx_requests_enabled == "true" ? 1 : 0}" count = "${var.cosmos_db_4xx_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Cosmos DB 4xx requests rate is high {{#is_alert}}{{comparator}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{comparator}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Cosmos DB 4xx requests rate is high {{#is_alert}}{{comparator}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{comparator}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cosmos_db_4xx_requests_message, var.message)}" message = "${coalesce(var.cosmos_db_4xx_requests_message, var.message)}"
# List of available status codes : https://docs.microsoft.com/en-us/rest/api/cosmos-db/http-status-codes-for-cosmosdb # List of available status codes : https://docs.microsoft.com/en-us/rest/api/cosmos-db/http-status-codes-for-cosmosdb
@ -79,7 +79,7 @@ resource "datadog_monitor" "cosmos_db_4xx_requests" {
resource "datadog_monitor" "cosmos_db_5xx_requests" { resource "datadog_monitor" "cosmos_db_5xx_requests" {
count = "${var.cosmos_db_5xx_requests_enabled == "true" ? 1 : 0}" count = "${var.cosmos_db_5xx_requests_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Cosmos DB 5xx requests rate is high {{#is_alert}}{{comparator}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{comparator}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Cosmos DB 5xx requests rate is high {{#is_alert}}{{comparator}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{comparator}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cosmos_db_5xx_requests_message, var.message)}" message = "${coalesce(var.cosmos_db_5xx_requests_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -115,7 +115,7 @@ resource "datadog_monitor" "cosmos_db_5xx_requests" {
resource "datadog_monitor" "cosmos_db_scaling" { resource "datadog_monitor" "cosmos_db_scaling" {
count = "${var.cosmos_db_scaling_enabled == "true" ? 1 : 0}" count = "${var.cosmos_db_scaling_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Cosmos DB max scaling reached for collection {{#is_alert}}{{comparator}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{comparator}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Cosmos DB max scaling reached for collection {{#is_alert}}{{comparator}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{comparator}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cosmos_db_scaling_message, var.message)}" message = "${coalesce(var.cosmos_db_scaling_message, var.message)}"
# List of available status codes : https://docs.microsoft.com/en-us/rest/api/cosmos-db/http-status-codes-for-cosmosdb # List of available status codes : https://docs.microsoft.com/en-us/rest/api/cosmos-db/http-status-codes-for-cosmosdb

View File

@ -29,6 +29,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable Datalake Store status monitor | string | `"true"` | no | | status\_enabled | Flag to enable Datalake Store status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Datalake Store status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | list | `[]` | no | | status\_extra\_tags | Extra tags for Datalake Store status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | list | `[]` | no |
| status\_message | Custom message for Datalake Store status monitor | string | `""` | no | | status\_message | Custom message for Datalake Store status monitor | string | `""` | no |

View File

@ -32,6 +32,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Azure Datalake Store specific variables # Azure Datalake Store specific variables
variable "status_enabled" { variable "status_enabled" {
description = "Flag to enable Datalake Store status monitor" description = "Flag to enable Datalake Store status monitor"

View File

@ -1,7 +1,7 @@
resource "datadog_monitor" "datalakestore_status" { resource "datadog_monitor" "datalakestore_status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Datalake Store is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Datalake Store is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -45,6 +45,7 @@ Creates DataDog monitors with the following checks:
| no\_successful\_message\_rate\_silenced | Groups to mute for²id no successful message monitor | map | `{}` | no | | no\_successful\_message\_rate\_silenced | Groups to mute for²id no successful message monitor | map | `{}` | no |
| no\_successful\_message\_rate\_time\_aggregator | Monitor aggregator for Event Grid no successful message [available values: min, max or avg] | string | `"min"` | no | | no\_successful\_message\_rate\_time\_aggregator | Monitor aggregator for Event Grid no successful message [available values: min, max or avg] | string | `"min"` | no |
| no\_successful\_message\_rate\_timeframe | Monitor timeframe for Event Grid no successful message [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | no\_successful\_message\_rate\_timeframe | Monitor timeframe for Event Grid no successful message [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| unmatched\_events\_rate\_enabled | Flag to enable Event Grid unmatched events monitor | string | `"true"` | no | | unmatched\_events\_rate\_enabled | Flag to enable Event Grid unmatched events monitor | string | `"true"` | no |
| unmatched\_events\_rate\_extra\_tags | Extra tags for Event Grid unmatched events monitor | list | `[]` | no | | unmatched\_events\_rate\_extra\_tags | Extra tags for Event Grid unmatched events monitor | list | `[]` | no |
| unmatched\_events\_rate\_message | Custom message for Event Grid unmatched events monitor | string | `""` | no | | unmatched\_events\_rate\_message | Custom message for Event Grid unmatched events monitor | string | `""` | no |

View File

@ -19,6 +19,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "filter_tags_use_defaults" { variable "filter_tags_use_defaults" {
description = "Use default filter tags convention" description = "Use default filter tags convention"
default = "true" default = "true"

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "eventgrid_no_successful_message" { resource "datadog_monitor" "eventgrid_no_successful_message" {
count = "${var.no_successful_message_rate_enabled == "true" ? 1 : 0}" count = "${var.no_successful_message_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Event Grid no successful message {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Event Grid no successful message {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.no_successful_message_rate_message, var.message)}" message = "${coalesce(var.no_successful_message_rate_message, var.message)}"
# Query is a bit weird, but we only want to check the no-data # Query is a bit weird, but we only want to check the no-data
@ -28,7 +28,7 @@ resource "datadog_monitor" "eventgrid_no_successful_message" {
resource "datadog_monitor" "eventgrid_failed_messages" { resource "datadog_monitor" "eventgrid_failed_messages" {
count = "${var.failed_messages_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_messages_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Event Grid too many failed messages {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Event Grid too many failed messages {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_messages_rate_message, var.message)}" message = "${coalesce(var.failed_messages_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -65,7 +65,7 @@ resource "datadog_monitor" "eventgrid_failed_messages" {
resource "datadog_monitor" "eventgrid_unmatched_events" { resource "datadog_monitor" "eventgrid_unmatched_events" {
count = "${var.unmatched_events_rate_enabled == "true" ? 1 : 0}" count = "${var.unmatched_events_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Event Grid too many unmatched events {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Event Grid too many unmatched events {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.unmatched_events_rate_message, var.message)}" message = "${coalesce(var.unmatched_events_rate_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -47,6 +47,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable Event Hub status monitor | string | `"true"` | no | | status\_enabled | Flag to enable Event Hub status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Event Hub status monitor | list | `[]` | no | | status\_extra\_tags | Extra tags for Event Hub status monitor | list | `[]` | no |
| status\_message | Custom message for Event Hub status monitor | string | `""` | no | | status\_message | Custom message for Event Hub status monitor | string | `""` | no |

View File

@ -19,6 +19,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "filter_tags_use_defaults" { variable "filter_tags_use_defaults" {
description = "Use default filter tags convention" description = "Use default filter tags convention"
default = "true" default = "true"

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "eventhub_status" { resource "datadog_monitor" "eventhub_status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Event Hub is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Event Hub is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -28,7 +28,7 @@ resource "datadog_monitor" "eventhub_status" {
resource "datadog_monitor" "eventhub_failed_requests" { resource "datadog_monitor" "eventhub_failed_requests" {
count = "${var.failed_requests_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_requests_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Event Hub too many failed requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Event Hub too many failed requests {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_requests_rate_message, var.message)}" message = "${coalesce(var.failed_requests_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -62,7 +62,7 @@ resource "datadog_monitor" "eventhub_failed_requests" {
resource "datadog_monitor" "eventhub_errors" { resource "datadog_monitor" "eventhub_errors" {
count = "${var.errors_rate_enabled == "true" ? 1 : 0}" count = "${var.errors_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Event Hub too many errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Event Hub too many errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.errors_rate_message, var.message)}" message = "${coalesce(var.errors_rate_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -57,6 +57,7 @@ Creates DataDog monitors with the following checks:
| http\_5xx\_errors\_rate\_timeframe | Monitor timeframe for Functions Http 5xx errors rate [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | http\_5xx\_errors\_rate\_timeframe | Monitor timeframe for Functions Http 5xx errors rate [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -32,6 +32,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Azure Function App specific variables # Azure Function App specific variables
variable "http_5xx_errors_rate_silenced" { variable "http_5xx_errors_rate_silenced" {
description = "Groups to mute for Functions Http 5xx errors rate monitor" description = "Groups to mute for Functions Http 5xx errors rate monitor"

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "function_http_5xx_errors_rate" { resource "datadog_monitor" "function_http_5xx_errors_rate" {
count = "${var.http_5xx_errors_rate_enabled == "true" ? 1 : 0}" count = "${var.http_5xx_errors_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Function App HTTP 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Function App HTTP 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.http_5xx_errors_rate_message, var.message)}" message = "${coalesce(var.http_5xx_errors_rate_message, var.message)}"
@ -32,7 +32,7 @@ resource "datadog_monitor" "function_http_5xx_errors_rate" {
resource "datadog_monitor" "function_high_connections_count" { resource "datadog_monitor" "function_high_connections_count" {
count = "${var.high_connections_count_enabled == "true" ? 1 : 0}" count = "${var.high_connections_count_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Function App connections count too high {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Function App connections count too high {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.high_connections_count_message, var.message)}" message = "${coalesce(var.high_connections_count_message, var.message)}"
@ -63,7 +63,7 @@ resource "datadog_monitor" "function_high_connections_count" {
resource "datadog_monitor" "function_high_threads_count" { resource "datadog_monitor" "function_high_threads_count" {
count = "${var.high_threads_count_enabled == "true" ? 1 : 0}" count = "${var.high_threads_count_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Function App threads count too high {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Function App threads count too high {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
type = "metric alert" type = "metric alert"
message = "${coalesce(var.high_threads_count_message, var.message)}" message = "${coalesce(var.high_threads_count_message, var.message)}"

View File

@ -130,6 +130,7 @@ Creates DataDog monitors with the following checks:
| orphaned\_d2c\_telemetry\_egress\_silenced | Groups to mute for IoT Hub orphaned d2c telemetry monitor | map | `{}` | no | | orphaned\_d2c\_telemetry\_egress\_silenced | Groups to mute for IoT Hub orphaned d2c telemetry monitor | map | `{}` | no |
| orphaned\_d2c\_telemetry\_egress\_time\_aggregator | Monitor aggregator for IoT Hub orphaned d2c telemetry [available values: min, max, sum or avg] | string | `"min"` | no | | orphaned\_d2c\_telemetry\_egress\_time\_aggregator | Monitor aggregator for IoT Hub orphaned d2c telemetry [available values: min, max, sum or avg] | string | `"min"` | no |
| orphaned\_d2c\_telemetry\_egress\_timeframe | Monitor timeframe for IoT Hub orphaned d2c telemetry [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | orphaned\_d2c\_telemetry\_egress\_timeframe | Monitor timeframe for IoT Hub orphaned d2c telemetry [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable IoT Hub status monitor | string | `"true"` | no | | status\_enabled | Flag to enable IoT Hub status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for IoT Hub status monitor | list | `[]` | no | | status\_extra\_tags | Extra tags for IoT Hub status monitor | list | `[]` | no |
| status\_message | Custom message for IoT Hub status monitor | string | `""` | no | | status\_message | Custom message for IoT Hub status monitor | string | `""` | no |

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "too_many_jobs_failed" { resource "datadog_monitor" "too_many_jobs_failed" {
count = "${var.failed_jobs_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_jobs_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many jobs failed {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many jobs failed {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_jobs_rate_message, var.message)}" message = "${coalesce(var.failed_jobs_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -36,7 +36,7 @@ resource "datadog_monitor" "too_many_jobs_failed" {
resource "datadog_monitor" "too_many_list_jobs_failed" { resource "datadog_monitor" "too_many_list_jobs_failed" {
count = "${var.failed_listjobs_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_listjobs_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many list_jobs failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many list_jobs failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_listjobs_rate_message, var.message)}" message = "${coalesce(var.failed_listjobs_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -72,7 +72,7 @@ resource "datadog_monitor" "too_many_list_jobs_failed" {
resource "datadog_monitor" "too_many_query_jobs_failed" { resource "datadog_monitor" "too_many_query_jobs_failed" {
count = "${var.failed_queryjobs_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_queryjobs_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many query_jobs failed {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many query_jobs failed {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_queryjobs_rate_message, var.message)}" message = "${coalesce(var.failed_queryjobs_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -108,7 +108,7 @@ resource "datadog_monitor" "too_many_query_jobs_failed" {
resource "datadog_monitor" "status" { resource "datadog_monitor" "status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -136,7 +136,7 @@ resource "datadog_monitor" "status" {
resource "datadog_monitor" "total_devices" { resource "datadog_monitor" "total_devices" {
count = "${var.total_devices_enabled == "true" ? 1 : 0}" count = "${var.total_devices_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Total devices is wrong {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Total devices is wrong {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.total_devices_message, var.message)}" message = "${coalesce(var.total_devices_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -164,7 +164,7 @@ resource "datadog_monitor" "total_devices" {
resource "datadog_monitor" "too_many_c2d_methods_failed" { resource "datadog_monitor" "too_many_c2d_methods_failed" {
count = "${var.failed_c2d_methods_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_c2d_methods_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many c2d methods failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many c2d methods failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_c2d_methods_rate_message, var.message)}" message = "${coalesce(var.failed_c2d_methods_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -200,7 +200,7 @@ resource "datadog_monitor" "too_many_c2d_methods_failed" {
resource "datadog_monitor" "too_many_c2d_twin_read_failed" { resource "datadog_monitor" "too_many_c2d_twin_read_failed" {
count = "${var.failed_c2d_twin_read_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_c2d_twin_read_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many c2d twin read failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many c2d twin read failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_c2d_twin_read_rate_message, var.message)}" message = "${coalesce(var.failed_c2d_twin_read_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -236,7 +236,7 @@ resource "datadog_monitor" "too_many_c2d_twin_read_failed" {
resource "datadog_monitor" "too_many_c2d_twin_update_failed" { resource "datadog_monitor" "too_many_c2d_twin_update_failed" {
count = "${var.failed_c2d_twin_update_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_c2d_twin_update_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many c2d twin update failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many c2d twin update failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_c2d_twin_update_rate_message, var.message)}" message = "${coalesce(var.failed_c2d_twin_update_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -272,7 +272,7 @@ resource "datadog_monitor" "too_many_c2d_twin_update_failed" {
resource "datadog_monitor" "too_many_d2c_twin_read_failed" { resource "datadog_monitor" "too_many_d2c_twin_read_failed" {
count = "${var.failed_d2c_twin_read_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_d2c_twin_read_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many d2c twin read failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many d2c twin read failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_d2c_twin_read_rate_message, var.message)}" message = "${coalesce(var.failed_d2c_twin_read_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -308,7 +308,7 @@ resource "datadog_monitor" "too_many_d2c_twin_read_failed" {
resource "datadog_monitor" "too_many_d2c_twin_update_failed" { resource "datadog_monitor" "too_many_d2c_twin_update_failed" {
count = "${var.failed_d2c_twin_update_rate_enabled == "true" ? 1 : 0}" count = "${var.failed_d2c_twin_update_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many d2c twin update failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many d2c twin update failure {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.failed_d2c_twin_update_rate_message, var.message)}" message = "${coalesce(var.failed_d2c_twin_update_rate_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -344,7 +344,7 @@ resource "datadog_monitor" "too_many_d2c_twin_update_failed" {
resource "datadog_monitor" "too_many_d2c_telemetry_egress_dropped" { resource "datadog_monitor" "too_many_d2c_telemetry_egress_dropped" {
count = "${var.dropped_d2c_telemetry_egress_enabled == "true" ? 1 : 0}" count = "${var.dropped_d2c_telemetry_egress_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many d2c telemetry egress dropped {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many d2c telemetry egress dropped {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.dropped_d2c_telemetry_egress_message, var.message)}" message = "${coalesce(var.dropped_d2c_telemetry_egress_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -382,7 +382,7 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_dropped" {
resource "datadog_monitor" "too_many_d2c_telemetry_egress_orphaned" { resource "datadog_monitor" "too_many_d2c_telemetry_egress_orphaned" {
count = "${var.orphaned_d2c_telemetry_egress_enabled == "true" ? 1 : 0}" count = "${var.orphaned_d2c_telemetry_egress_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many d2c telemetry egress orphaned {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many d2c telemetry egress orphaned {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.orphaned_d2c_telemetry_egress_message, var.message)}" message = "${coalesce(var.orphaned_d2c_telemetry_egress_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -420,7 +420,7 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_orphaned" {
resource "datadog_monitor" "too_many_d2c_telemetry_egress_invalid" { resource "datadog_monitor" "too_many_d2c_telemetry_egress_invalid" {
count = "${var.invalid_d2c_telemetry_egress_enabled == "true" ? 1 : 0}" count = "${var.invalid_d2c_telemetry_egress_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many d2c telemetry egress invalid {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many d2c telemetry egress invalid {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.invalid_d2c_telemetry_egress_message, var.message)}" message = "${coalesce(var.invalid_d2c_telemetry_egress_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -458,7 +458,7 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_invalid" {
resource "datadog_monitor" "too_many_d2c_telemetry_ingress_nosent" { resource "datadog_monitor" "too_many_d2c_telemetry_ingress_nosent" {
count = "${var.too_many_d2c_telemetry_ingress_nosent_enabled == "true" ? 1 : 0}" count = "${var.too_many_d2c_telemetry_ingress_nosent_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] IOT Hub Too many d2c telemetry ingress not sent {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] IOT Hub Too many d2c telemetry ingress not sent {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.too_many_d2c_telemetry_ingress_nosent_message, var.message)}" message = "${coalesce(var.too_many_d2c_telemetry_ingress_nosent_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -47,6 +47,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable Key Vault status monitor | string | `"true"` | no | | status\_enabled | Flag to enable Key Vault status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Key Vault status monitor | list | `[]` | no | | status\_extra\_tags | Extra tags for Key Vault status monitor | list | `[]` | no |
| status\_message | Custom message for Key Vault status monitor | string | `""` | no | | status\_message | Custom message for Key Vault status monitor | string | `""` | no |

View File

@ -32,6 +32,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Azure Key Vault specific variables # Azure Key Vault specific variables
variable "status_enabled" { variable "status_enabled" {
description = "Flag to enable Key Vault status monitor" description = "Flag to enable Key Vault status monitor"

View File

@ -1,7 +1,7 @@
resource "datadog_monitor" "keyvault_status" { resource "datadog_monitor" "keyvault_status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Key Vault is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Key Vault is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -30,7 +30,7 @@ resource "datadog_monitor" "keyvault_status" {
resource "datadog_monitor" "keyvault_api_result" { resource "datadog_monitor" "keyvault_api_result" {
count = "${var.api_result_enabled == "true" ? 1 : 0}" count = "${var.api_result_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Key Vault API result rate is low {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Key Vault API result rate is low {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -67,7 +67,7 @@ resource "datadog_monitor" "keyvault_api_result" {
resource "datadog_monitor" "keyvault_api_latency" { resource "datadog_monitor" "keyvault_api_latency" {
count = "${var.api_latency_enabled == "true" ? 1 : 0}" count = "${var.api_latency_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Key Vault API latency is high {{#is_alert}}{{{comparator}}} {{threshold}}ms ({{value}}ms){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}ms ({{value}}ms){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Key Vault API latency is high {{#is_alert}}{{{comparator}}} {{threshold}}ms ({{value}}ms){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}ms ({{value}}ms){{/is_warning}}"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -29,6 +29,7 @@ Creates DataDog monitors with the following checks:
| filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no | | filter\_tags\_use\_defaults | Use default filter tags convention | string | `"true"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes | | message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable Load Balancer status monitor | string | `"true"` | no | | status\_enabled | Flag to enable Load Balancer status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Load Balancer status monitor | list | `[]` | no | | status\_extra\_tags | Extra tags for Load Balancer status monitor | list | `[]` | no |
| status\_message | Custom message for Load Balancer status monitor | string | `""` | no | | status\_message | Custom message for Load Balancer status monitor | string | `""` | no |

View File

@ -32,6 +32,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
# Azure Load Balancer specific variables # Azure Load Balancer specific variables
variable "status_enabled" { variable "status_enabled" {
description = "Flag to enable Load Balancer status monitor" description = "Flag to enable Load Balancer status monitor"

View File

@ -1,7 +1,7 @@
resource "datadog_monitor" "loadbalancer_status" { resource "datadog_monitor" "loadbalancer_status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Load Balancer is unreachable" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Load Balancer is unreachable"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -65,6 +65,7 @@ Creates DataDog monitors with the following checks:
| memory\_usage\_timeframe | Monitor timeframe for Mysql memory [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no | | memory\_usage\_timeframe | Monitor timeframe for Mysql memory [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes | | message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "mysql_cpu_usage" { resource "datadog_monitor" "mysql_cpu_usage" {
count = "${var.cpu_usage_enabled == "true" ? 1 : 0}" count = "${var.cpu_usage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Mysql Server CPU usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Mysql Server CPU usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_usage_message, var.message)}" message = "${coalesce(var.cpu_usage_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -33,7 +33,7 @@ resource "datadog_monitor" "mysql_cpu_usage" {
resource "datadog_monitor" "mysql_free_storage" { resource "datadog_monitor" "mysql_free_storage" {
count = "${var.free_storage_enabled == "true" ? 1 : 0}" count = "${var.free_storage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Mysql Server storage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Mysql Server storage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.free_storage_message, var.message)}" message = "${coalesce(var.free_storage_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -66,7 +66,7 @@ resource "datadog_monitor" "mysql_free_storage" {
resource "datadog_monitor" "mysql_io_consumption" { resource "datadog_monitor" "mysql_io_consumption" {
count = "${var.io_consumption_enabled == "true" ? 1 : 0}" count = "${var.io_consumption_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Mysql Server IO consumption {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Mysql Server IO consumption {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.io_consumption_message, var.message)}" message = "${coalesce(var.io_consumption_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -99,7 +99,7 @@ resource "datadog_monitor" "mysql_io_consumption" {
resource "datadog_monitor" "mysql_memory_usage" { resource "datadog_monitor" "mysql_memory_usage" {
count = "${var.memory_usage_enabled == "true" ? 1 : 0}" count = "${var.memory_usage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Mysql Server memory usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Mysql Server memory usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.memory_usage_message, var.message)}" message = "${coalesce(var.memory_usage_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -70,6 +70,7 @@ Creates DataDog monitors with the following checks:
| no\_connection\_silenced | Groups to mute for PostgreSQL no connection monitor | map | `{}` | no | | no\_connection\_silenced | Groups to mute for PostgreSQL no connection monitor | map | `{}` | no |
| no\_connection\_time\_aggregator | Monitor aggregator for PostgreSQL no connection [available values: min, max or avg] | string | `"min"` | no | | no\_connection\_time\_aggregator | Monitor aggregator for PostgreSQL no connection [available values: min, max or avg] | string | `"min"` | no |
| no\_connection\_timeframe | Monitor timeframe for PostgreSQL no connection [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | no\_connection\_timeframe | Monitor timeframe for PostgreSQL no connection [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
## Outputs ## Outputs

View File

@ -15,6 +15,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "message" { variable "message" {
description = "Message sent when an alert is triggered" description = "Message sent when an alert is triggered"
} }

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "postgresql_cpu_usage" { resource "datadog_monitor" "postgresql_cpu_usage" {
count = "${var.cpu_usage_enabled == "true" ? 1 : 0}" count = "${var.cpu_usage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Postgresql Server CPU usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Postgresql Server CPU usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_usage_message, var.message)}" message = "${coalesce(var.cpu_usage_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -33,7 +33,7 @@ resource "datadog_monitor" "postgresql_cpu_usage" {
resource "datadog_monitor" "postgresql_no_connection" { resource "datadog_monitor" "postgresql_no_connection" {
count = "${var.no_connection_enabled == "true" ? 1 : 0}" count = "${var.no_connection_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Postgresql Server has no connection" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Postgresql Server has no connection"
message = "${coalesce(var.no_connection_message, var.message)}" message = "${coalesce(var.no_connection_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -61,7 +61,7 @@ resource "datadog_monitor" "postgresql_no_connection" {
resource "datadog_monitor" "postgresql_free_storage" { resource "datadog_monitor" "postgresql_free_storage" {
count = "${var.free_storage_enabled == "true" ? 1 : 0}" count = "${var.free_storage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Postgresql Server storage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Postgresql Server storage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.free_storage_message, var.message)}" message = "${coalesce(var.free_storage_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -94,7 +94,7 @@ resource "datadog_monitor" "postgresql_free_storage" {
resource "datadog_monitor" "postgresql_io_consumption" { resource "datadog_monitor" "postgresql_io_consumption" {
count = "${var.io_consumption_enabled == "true" ? 1 : 0}" count = "${var.io_consumption_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Postgresql Server IO consumption {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Postgresql Server IO consumption {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.io_consumption_message, var.message)}" message = "${coalesce(var.io_consumption_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -127,7 +127,7 @@ resource "datadog_monitor" "postgresql_io_consumption" {
resource "datadog_monitor" "postgresql_memory_usage" { resource "datadog_monitor" "postgresql_memory_usage" {
count = "${var.memory_usage_enabled == "true" ? 1 : 0}" count = "${var.memory_usage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Postgresql Server memory usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Postgresql Server memory usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.memory_usage_message, var.message)}" message = "${coalesce(var.memory_usage_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -48,6 +48,7 @@ Creates DataDog monitors with the following checks:
| percent\_processor\_time\_threshold\_warning | Processor time percent (warning threshold) | string | `"60"` | no | | percent\_processor\_time\_threshold\_warning | Processor time percent (warning threshold) | string | `"60"` | no |
| percent\_processor\_time\_time\_aggregator | Monitor aggregator for Redis processor [available values: min, max or avg] | string | `"min"` | no | | percent\_processor\_time\_time\_aggregator | Monitor aggregator for Redis processor [available values: min, max or avg] | string | `"min"` | no |
| percent\_processor\_time\_timeframe | Monitor timeframe for Redis processor [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | percent\_processor\_time\_timeframe | Monitor timeframe for Redis processor [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| server\_load\_rate\_enabled | Flag to enable Redis server load monitor | string | `"true"` | no | | server\_load\_rate\_enabled | Flag to enable Redis server load monitor | string | `"true"` | no |
| server\_load\_rate\_extra\_tags | Extra tags for Redis server load monitor | list | `[]` | no | | server\_load\_rate\_extra\_tags | Extra tags for Redis server load monitor | list | `[]` | no |
| server\_load\_rate\_message | Custom message for Redis server load monitor | string | `""` | no | | server\_load\_rate\_message | Custom message for Redis server load monitor | string | `""` | no |

View File

@ -19,6 +19,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "filter_tags_use_defaults" { variable "filter_tags_use_defaults" {
description = "Use default filter tags convention" description = "Use default filter tags convention"
default = "true" default = "true"

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "status" { resource "datadog_monitor" "status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Redis {{name}} is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Redis {{name}} is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -28,7 +28,7 @@ resource "datadog_monitor" "status" {
resource "datadog_monitor" "evictedkeys" { resource "datadog_monitor" "evictedkeys" {
count = "${var.evictedkeys_limit_enabled == "true" ? 1 : 0}" count = "${var.evictedkeys_limit_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Redis too many evictedkeys {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Redis too many evictedkeys {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.evictedkeys_limit_message, var.message)}" message = "${coalesce(var.evictedkeys_limit_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -61,7 +61,7 @@ resource "datadog_monitor" "evictedkeys" {
resource "datadog_monitor" "percent_processor_time" { resource "datadog_monitor" "percent_processor_time" {
count = "${var.percent_processor_time_enabled == "true" ? 1 : 0}" count = "${var.percent_processor_time_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Redis processor time too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Redis processor time too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.percent_processor_time_message, var.message)}" message = "${coalesce(var.percent_processor_time_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -94,7 +94,7 @@ resource "datadog_monitor" "percent_processor_time" {
resource "datadog_monitor" "server_load" { resource "datadog_monitor" "server_load" {
count = "${var.server_load_rate_enabled == "true" ? 1 : 0}" count = "${var.server_load_rate_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Redis server load too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Redis server load too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.server_load_rate_message, var.message)}" message = "${coalesce(var.server_load_rate_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -46,6 +46,7 @@ Creates DataDog monitors with the following checks:
| memory\_percentage\_timeframe | Monitor timeframe for serverfarms memory_percentage [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | memory\_percentage\_timeframe | Monitor timeframe for serverfarms memory_percentage [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when a serverfarms monitor is triggered | string | n/a | yes | | message | Message sent when a serverfarms monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no | | new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable the serverfarms status monitor | string | `"true"` | no | | status\_enabled | Flag to enable the serverfarms status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for serverfarms status monitor | list | `[]` | no | | status\_extra\_tags | Extra tags for serverfarms status monitor | list | `[]` | no |
| status\_message | Custom message for serverfarm status monitor | string | `""` | no | | status\_message | Custom message for serverfarm status monitor | string | `""` | no |

View File

@ -19,6 +19,11 @@ variable "new_host_delay" {
default = 300 default = 300
} }
variable "prefix_slug" {
description = "Prefix string to prepend between brackets on every monitors names"
default = ""
}
variable "filter_tags_use_defaults" { variable "filter_tags_use_defaults" {
description = "Use default filter tags convention" description = "Use default filter tags convention"
default = "true" default = "true"

View File

@ -1,6 +1,6 @@
resource "datadog_monitor" "status" { resource "datadog_monitor" "status" {
count = "${var.status_enabled == "true" ? 1 : 0}" count = "${var.status_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Serverfarm is down" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Serverfarm is down"
message = "${coalesce(var.status_message, var.message)}" message = "${coalesce(var.status_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -28,7 +28,7 @@ resource "datadog_monitor" "status" {
resource "datadog_monitor" "cpu_percentage" { resource "datadog_monitor" "cpu_percentage" {
count = "${var.cpu_percentage_enabled == "true" ? 1 : 0}" count = "${var.cpu_percentage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Serverfarm CPU percentage is too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Serverfarm CPU percentage is too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_percentage_message, var.message)}" message = "${coalesce(var.cpu_percentage_message, var.message)}"
query = <<EOQ query = <<EOQ
@ -61,7 +61,7 @@ resource "datadog_monitor" "cpu_percentage" {
resource "datadog_monitor" "memory_percentage" { resource "datadog_monitor" "memory_percentage" {
count = "${var.memory_percentage_enabled == "true" ? 1 : 0}" count = "${var.memory_percentage_enabled == "true" ? 1 : 0}"
name = "[${var.environment}] Serverfarm memory percentage is too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}" name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Serverfarm memory percentage is too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.memory_percentage_message, var.message)}" message = "${coalesce(var.memory_percentage_message, var.message)}"
query = <<EOQ query = <<EOQ

View File

@ -37,6 +37,7 @@ Creates DataDog monitors with the following checks:
| no\_active\_connections\_silenced | Groups to mute for Service Bus status monitor | map | `{}` | no | | no\_active\_connections\_silenced | Groups to mute for Service Bus status monitor | map | `{}` | no |
| no\_active\_connections\_time\_aggregator | Monitor aggregator for Service Bus status [available values: min, max or avg] | string | `"max"` | no | | no\_active\_connections\_time\_aggregator | Monitor aggregator for Service Bus status [available values: min, max or avg] | string | `"max"` | no |
| no\_active\_connections\_timeframe | Monitor timeframe for Service Bus status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no | | no\_active\_connections\_timeframe | Monitor timeframe for Service Bus status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| server\_errors\_enabled | Flag to enable Service Bus server errors monitor | string | `"true"` | no | | server\_errors\_enabled | Flag to enable Service Bus server errors monitor | string | `"true"` | no |
| server\_errors\_message | Custom message for Service Bus server errors monitor | string | `""` | no | | server\_errors\_message | Custom message for Service Bus server errors monitor | string | `""` | no |
| server\_errors\_silenced | Groups to mute for Service Bus server errors monitor | map | `{}` | no | | server\_errors\_silenced | Groups to mute for Service Bus server errors monitor | map | `{}` | no |

Some files were not shown because too many files have changed in this diff Show More