Merge branch 'MON-479_remove_silenced' into 'master'

MON-479 Upgrade datadog terraform provider to v2

Closes MON-479

See merge request claranet/pt-monitoring/projects/datadog/terraform/monitors!72
This commit is contained in:
Quentin Manfroi 2019-07-02 15:15:38 +02:00
commit d82101c06c
166 changed files with 183 additions and 2250 deletions

View File

@ -27,7 +27,7 @@ Before importing some modules, you must define the DataDog provider in your `mai
```
provider "datadog" {
version = "1.7.0" # if you use version before 1.0.4 you will have diff on monitor type
version = "2.0.2"
api_key = "${var.datadog_api_key}"
app_key = "${var.datadog_app_key}"

View File

@ -26,7 +26,6 @@ Creates DataDog monitors with the following checks:
| ark\_schedules\_extra\_tags | Extra tags for Ark schedules monitor | list | `[]` | no |
| ark\_schedules\_monitor\_message | Custom message for Ark schedules monitor | string | `""` | no |
| ark\_schedules\_monitor\_no\_data\_timeframe | No data timeframe in minutes | string | `"1440"` | no |
| ark\_schedules\_monitor\_silenced | Groups to mute for Ark schedules monitor | map | `{}` | no |
| ark\_schedules\_monitor\_timeframe | Monitor timeframe for Ark schedules monitor [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_1d"` | no |
| environment | Architecture environment | string | n/a | yes |
| evaluation\_delay | Delay in seconds for the metric evaluation | string | `"15"` | no |

View File

@ -52,12 +52,6 @@ variable "ark_schedules_monitor_timeframe" {
default = "last_1d"
}
variable "ark_schedules_monitor_silenced" {
description = "Groups to mute for Ark schedules monitor"
type = "map"
default = {}
}
variable "ark_schedules_enabled" {
description = "Flag to enable Ark schedules monitor"
type = "string"

View File

@ -1,7 +1,7 @@
resource "datadog_monitor" "ark_schedules_monitor" {
count = "${var.ark_schedules_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Ark backup failed"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.ark_schedules_monitor_message, var.message)}"
query = <<EOQ
@ -25,6 +25,5 @@ resource "datadog_monitor" "ark_schedules_monitor" {
locked = false
require_full_window = false
silenced = "${var.ark_schedules_monitor_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:prometheus", "resource:ark", "team:claranet", "created-by:terraform", "${var.ark_schedules_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:prometheus", "resource:ark", "team:claranet", "created-by:terraform", "${var.ark_schedules_extra_tags}"]
}

View File

@ -25,7 +25,6 @@ Creates DataDog monitors with the following checks:
| apiserver\_enabled | Flag to enable API server monitor | string | `"true"` | no |
| apiserver\_extra\_tags | Extra tags for API server monitor | list | `[]` | no |
| apiserver\_message | Custom message for API server monitor | string | `""` | no |
| apiserver\_silenced | Groups to mute for API server monitor | map | `{}` | no |
| apiserver\_threshold\_warning | API server monitor (warning threshold) | string | `"3"` | no |
| environment | Architecture environment | string | n/a | yes |
| evaluation\_delay | Delay in seconds for the metric evaluation | string | `"15"` | no |

View File

@ -40,12 +40,6 @@ variable "prefix_slug" {
# Datadog monitors variables
variable "apiserver_silenced" {
description = "Groups to mute for API server monitor"
type = "map"
default = {}
}
variable "apiserver_enabled" {
description = "Flag to enable API server monitor"
type = "string"

View File

@ -23,7 +23,5 @@ resource "datadog_monitor" "apiserver" {
locked = false
require_full_window = true
silenced = "${var.apiserver_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.apiserver_extra_tags}"]
}

View File

@ -32,7 +32,6 @@ Creates DataDog monitors with the following checks:
| ingress\_4xx\_enabled | Flag to enable Ingress 4xx errors monitor | string | `"true"` | no |
| ingress\_4xx\_extra\_tags | Extra tags for Ingress 4xx errors monitor | list | `[]` | no |
| ingress\_4xx\_message | Message sent when an alert is triggered | string | `""` | no |
| ingress\_4xx\_silenced | Groups to mute for Ingress 4xx errors monitor | map | `{}` | no |
| ingress\_4xx\_threshold\_critical | 4xx critical threshold in percentage | string | `"40"` | no |
| ingress\_4xx\_threshold\_warning | 4xx warning threshold in percentage | string | `"20"` | no |
| ingress\_4xx\_time\_aggregator | Monitor aggregator for Ingress 4xx errors [available values: min, max or avg] | string | `"min"` | no |
@ -40,7 +39,6 @@ Creates DataDog monitors with the following checks:
| ingress\_5xx\_enabled | Flag to enable Ingress 5xx errors monitor | string | `"true"` | no |
| ingress\_5xx\_extra\_tags | Extra tags for Ingress 5xx errors monitor | list | `[]` | no |
| ingress\_5xx\_message | Message sent when an alert is triggered | string | `""` | no |
| ingress\_5xx\_silenced | Groups to mute for Ingress 5xx errors monitor | map | `{}` | no |
| ingress\_5xx\_threshold\_critical | 5xx critical threshold in percentage | string | `"20"` | no |
| ingress\_5xx\_threshold\_warning | 5xx warning threshold in percentage | string | `"10"` | no |
| ingress\_5xx\_time\_aggregator | Monitor aggregator for Ingress 5xx errors [available values: min, max or avg] | string | `"min"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
#Ingress
variable "ingress_5xx_silenced" {
description = "Groups to mute for Ingress 5xx errors monitor"
type = "map"
default = {}
}
variable "ingress_5xx_enabled" {
description = "Flag to enable Ingress 5xx errors monitor"
@ -87,12 +82,6 @@ variable "ingress_5xx_threshold_warning" {
description = "5xx warning threshold in percentage"
}
variable "ingress_4xx_silenced" {
description = "Groups to mute for Ingress 4xx errors monitor"
type = "map"
default = {}
}
variable "ingress_4xx_enabled" {
description = "Flag to enable Ingress 4xx errors monitor"
type = "string"

View File

@ -10,7 +10,7 @@ resource "datadog_monitor" "nginx_ingress_too_many_5xx" {
* 100, 0) > ${var.ingress_5xx_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.ingress_5xx_threshold_warning}"
@ -27,8 +27,6 @@ resource "datadog_monitor" "nginx_ingress_too_many_5xx" {
locked = false
require_full_window = true
silenced = "${var.ingress_5xx_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:prometheus", "resource:nginx-ingress-controller", "team:claranet", "created-by:terraform", "${var.ingress_5xx_extra_tags}"]
}
@ -44,7 +42,7 @@ resource "datadog_monitor" "nginx_ingress_too_many_4xx" {
* 100, 0) > ${var.ingress_4xx_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.ingress_4xx_threshold_warning}"
@ -61,7 +59,5 @@ resource "datadog_monitor" "nginx_ingress_too_many_4xx" {
locked = false
require_full_window = true
silenced = "${var.ingress_4xx_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:prometheus", "resource:nginx-ingress-controller", "team:claranet", "created-by:terraform", "${var.ingress_4xx_extra_tags}"]
}

View File

@ -34,12 +34,10 @@ Creates DataDog monitors with the following checks:
| disk\_out\_enabled | Flag to enable Out of disk monitor | string | `"true"` | no |
| disk\_out\_extra\_tags | Extra tags for Out of disk monitor | list | `[]` | no |
| disk\_out\_message | Custom message for Out of disk monitor | string | `""` | no |
| disk\_out\_silenced | Groups to mute for Out of disk monitor | map | `{}` | no |
| disk\_out\_threshold\_warning | Out of disk monitor (warning threshold) | string | `"3"` | no |
| disk\_pressure\_enabled | Flag to enable Disk pressure monitor | string | `"true"` | no |
| disk\_pressure\_extra\_tags | Extra tags for Disk pressure monitor | list | `[]` | no |
| disk\_pressure\_message | Custom message for Disk pressure monitor | string | `""` | no |
| disk\_pressure\_silenced | Groups to mute for Disk pressure monitor | map | `{}` | no |
| disk\_pressure\_threshold\_warning | Disk pressure monitor (warning threshold) | string | `"3"` | no |
| environment | Architecture environment | string | n/a | yes |
| evaluation\_delay | Delay in seconds for the metric evaluation | string | `"15"` | no |
@ -49,43 +47,36 @@ Creates DataDog monitors with the following checks:
| kubelet\_ping\_enabled | Flag to enable Kubelet ping monitor | string | `"true"` | no |
| kubelet\_ping\_extra\_tags | Extra tags for Kubelet ping monitor | list | `[]` | no |
| kubelet\_ping\_message | Custom message for Kubelet ping monitor | string | `""` | no |
| kubelet\_ping\_silenced | Groups to mute for Kubelet ping monitor | map | `{}` | no |
| kubelet\_ping\_threshold\_warning | Kubelet ping monitor (warning threshold) | string | `"3"` | no |
| kubelet\_syncloop\_enabled | Flag to enable Kubelet sync loop monitor | string | `"true"` | no |
| kubelet\_syncloop\_extra\_tags | Extra tags for Kubelet sync loop monitor | list | `[]` | no |
| kubelet\_syncloop\_message | Custom message for Kubelet sync loop monitor | string | `""` | no |
| kubelet\_syncloop\_silenced | Groups to mute for Kubelet sync loop monitor | map | `{}` | no |
| kubelet\_syncloop\_threshold\_warning | Kubelet sync loop monitor (warning threshold) | string | `"3"` | no |
| memory\_pressure\_enabled | Flag to enable Memory pressure monitor | string | `"true"` | no |
| memory\_pressure\_extra\_tags | Extra tags for Memory pressure monitor | list | `[]` | no |
| memory\_pressure\_message | Custom message for Memory pressure monitor | string | `""` | no |
| memory\_pressure\_silenced | Groups to mute for Memory pressure monitor | map | `{}` | no |
| memory\_pressure\_threshold\_warning | Memory pressure monitor (warning threshold) | string | `"3"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
| node\_unschedulable\_enabled | Flag to enable node unschedulable monitor | string | `"true"` | no |
| node\_unschedulable\_extra\_tags | Extra tags for node unschedulable monitor | list | `[]` | no |
| node\_unschedulable\_message | Custom message for node unschedulable monitor | string | `""` | no |
| node\_unschedulable\_silenced | Groups to mute for node unschedulable monitor | map | `{}` | no |
| node\_unschedulable\_time\_aggregator | Monitor aggregator for node unschedulable [available values: min, max or avg] | string | `"min"` | no |
| node\_unschedulable\_timeframe | Monitor timeframe for node unschedulable [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_1h"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| ready\_enabled | Flag to enable Node ready monitor | string | `"true"` | no |
| ready\_extra\_tags | Extra tags for Node ready monitor | list | `[]` | no |
| ready\_message | Custom message for Node ready monitor | string | `""` | no |
| ready\_silenced | Groups to mute for Node ready monitor | map | `{}` | no |
| ready\_threshold\_warning | Node ready monitor (warning threshold) | string | `"3"` | no |
| unregister\_net\_device\_enabled | Flag to enable Unregister net device monitor | string | `"true"` | no |
| unregister\_net\_device\_extra\_tags | Extra tags for Unregister net device monitor | list | `[]` | no |
| unregister\_net\_device\_message | Custom message for Unregister net device monitor | string | `""` | no |
| unregister\_net\_device\_silenced | Groups to mute for Unregister net device monitor | map | `{}` | no |
| unregister\_net\_device\_threshold\_critical | Unregister net device critical threshold | string | `"3"` | no |
| unregister\_net\_device\_time\_aggregator | Monitor aggregator for Unregister net device [available values: min, max or avg] | string | `"min"` | no |
| unregister\_net\_device\_timeframe | Monitor timeframe for Unregister net device [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"15m"` | no |
| volume\_inodes\_enabled | Flag to enable Volume inodes monitor | string | `"true"` | no |
| volume\_inodes\_extra\_tags | Extra tags for Volume inodes monitor | list | `[]` | no |
| volume\_inodes\_message | Custom message for Volume inodes monitor | string | `""` | no |
| volume\_inodes\_silenced | Groups to mute for Volume inodes monitor | map | `{}` | no |
| volume\_inodes\_threshold\_critical | Volume inodes critical threshold | string | `"95"` | no |
| volume\_inodes\_threshold\_warning | Volume inodes warning threshold | string | `"90"` | no |
| volume\_inodes\_time\_aggregator | Monitor aggregator for Volume inodes [available values: min, max or avg] | string | `"min"` | no |
@ -93,7 +84,6 @@ Creates DataDog monitors with the following checks:
| volume\_space\_enabled | Flag to enable Volume space monitor | string | `"true"` | no |
| volume\_space\_extra\_tags | Extra tags for Volume space monitor | list | `[]` | no |
| volume\_space\_message | Custom message for Volume space monitor | string | `""` | no |
| volume\_space\_silenced | Groups to mute for Volume space monitor | map | `{}` | no |
| volume\_space\_threshold\_critical | Volume space critical threshold | string | `"95"` | no |
| volume\_space\_threshold\_warning | Volume space warning threshold | string | `"90"` | no |
| volume\_space\_time\_aggregator | Monitor aggregator for Volume space [available values: min, max or avg] | string | `"min"` | no |

View File

@ -40,12 +40,6 @@ variable "prefix_slug" {
# Datadog monitors variables
variable "disk_pressure_silenced" {
description = "Groups to mute for Disk pressure monitor"
type = "map"
default = {}
}
variable "disk_pressure_enabled" {
description = "Flag to enable Disk pressure monitor"
type = "string"
@ -70,12 +64,6 @@ variable "disk_pressure_threshold_warning" {
default = 3
}
variable "disk_out_silenced" {
description = "Groups to mute for Out of disk monitor"
type = "map"
default = {}
}
variable "disk_out_enabled" {
description = "Flag to enable Out of disk monitor"
type = "string"
@ -100,12 +88,6 @@ variable "disk_out_threshold_warning" {
default = 3
}
variable "memory_pressure_silenced" {
description = "Groups to mute for Memory pressure monitor"
type = "map"
default = {}
}
variable "memory_pressure_enabled" {
description = "Flag to enable Memory pressure monitor"
type = "string"
@ -130,12 +112,6 @@ variable "memory_pressure_threshold_warning" {
default = 3
}
variable "ready_silenced" {
description = "Groups to mute for Node ready monitor"
type = "map"
default = {}
}
variable "ready_enabled" {
description = "Flag to enable Node ready monitor"
type = "string"
@ -160,12 +136,6 @@ variable "ready_threshold_warning" {
default = 3
}
variable "kubelet_ping_silenced" {
description = "Groups to mute for Kubelet ping monitor"
type = "map"
default = {}
}
variable "kubelet_ping_enabled" {
description = "Flag to enable Kubelet ping monitor"
type = "string"
@ -190,12 +160,6 @@ variable "kubelet_ping_threshold_warning" {
default = 3
}
variable "kubelet_syncloop_silenced" {
description = "Groups to mute for Kubelet sync loop monitor"
type = "map"
default = {}
}
variable "kubelet_syncloop_enabled" {
description = "Flag to enable Kubelet sync loop monitor"
type = "string"
@ -220,12 +184,6 @@ variable "kubelet_syncloop_threshold_warning" {
default = 3
}
variable "unregister_net_device_silenced" {
description = "Groups to mute for Unregister net device monitor"
type = "map"
default = {}
}
variable "unregister_net_device_enabled" {
description = "Flag to enable Unregister net device monitor"
type = "string"
@ -261,12 +219,6 @@ variable "unregister_net_device_threshold_critical" {
description = "Unregister net device critical threshold"
}
variable "node_unschedulable_silenced" {
description = "Groups to mute for node unschedulable monitor"
type = "map"
default = {}
}
variable "node_unschedulable_enabled" {
description = "Flag to enable node unschedulable monitor"
type = "string"
@ -297,12 +249,6 @@ variable "node_unschedulable_timeframe" {
default = "last_1h"
}
variable "volume_space_silenced" {
description = "Groups to mute for Volume space monitor"
type = "map"
default = {}
}
variable "volume_space_enabled" {
description = "Flag to enable Volume space monitor"
type = "string"
@ -343,12 +289,6 @@ variable "volume_space_threshold_warning" {
description = "Volume space warning threshold"
}
variable "volume_inodes_silenced" {
description = "Groups to mute for Volume inodes monitor"
type = "map"
default = {}
}
variable "volume_inodes_enabled" {
description = "Flag to enable Volume inodes monitor"
type = "string"

View File

@ -23,8 +23,6 @@ resource "datadog_monitor" "disk_pressure" {
locked = false
require_full_window = true
silenced = "${var.disk_pressure_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.disk_pressure_extra_tags}"]
}
@ -53,8 +51,6 @@ resource "datadog_monitor" "disk_out" {
locked = false
require_full_window = true
silenced = "${var.disk_out_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.disk_out_extra_tags}"]
}
@ -83,8 +79,6 @@ resource "datadog_monitor" "memory_pressure" {
locked = false
require_full_window = true
silenced = "${var.memory_pressure_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.memory_pressure_extra_tags}"]
}
@ -113,8 +107,6 @@ resource "datadog_monitor" "ready" {
locked = false
require_full_window = true
silenced = "${var.ready_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.ready_extra_tags}"]
}
@ -143,8 +135,6 @@ resource "datadog_monitor" "kubelet_ping" {
locked = false
require_full_window = true
silenced = "${var.kubelet_ping_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.kubelet_ping_extra_tags}"]
}
@ -173,8 +163,6 @@ resource "datadog_monitor" "kubelet_syncloop" {
locked = false
require_full_window = true
silenced = "${var.kubelet_syncloop_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.kubelet_syncloop_extra_tags}"]
}
@ -197,8 +185,7 @@ resource "datadog_monitor" "unregister_net_device" {
include_tags = true
locked = false
silenced = "${var.unregister_net_device_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.unregister_net_device_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.unregister_net_device_extra_tags}"]
}
resource "datadog_monitor" "node_unschedulable" {
@ -228,14 +215,13 @@ resource "datadog_monitor" "node_unschedulable" {
locked = false
require_full_window = true
silenced = "${var.node_unschedulable_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.node_unschedulable_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.node_unschedulable_extra_tags}"]
}
resource "datadog_monitor" "volume_space" {
count = "${var.volume_space_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node volume space usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.volume_space_message, var.message)}"
query = <<EOQ
@ -261,14 +247,13 @@ resource "datadog_monitor" "volume_space" {
locked = false
require_full_window = true
silenced = "${var.volume_space_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.volume_space_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.volume_space_extra_tags}"]
}
resource "datadog_monitor" "volume_inodes" {
count = "${var.volume_inodes_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Node volume inodes usage {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.volume_inodes_message, var.message)}"
query = <<EOQ
@ -294,6 +279,5 @@ resource "datadog_monitor" "volume_inodes" {
locked = false
require_full_window = true
silenced = "${var.volume_inodes_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.volume_inodes_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-node", "team:claranet", "created-by:terraform", "${var.volume_inodes_extra_tags}"]
}

View File

@ -27,7 +27,6 @@ Creates DataDog monitors with the following checks:
| error\_enabled | Flag to enable Pod errors monitor | string | `"true"` | no |
| error\_extra\_tags | Extra tags for Pod errors monitor | list | `[]` | no |
| error\_message | Custom message for Pod errors monitor | string | `""` | no |
| error\_silenced | Groups to mute for Pod errors monitor | map | `{}` | no |
| error\_threshold\_critical | error critical threshold | string | `"0.5"` | no |
| error\_threshold\_warning | error warning threshold | string | `"0"` | no |
| error\_time\_aggregator | Monitor aggregator for Pod errors [available values: min, max or avg] | string | `"sum"` | no |
@ -41,7 +40,6 @@ Creates DataDog monitors with the following checks:
| pod\_phase\_status\_enabled | Flag to enable Pod phase status monitor | string | `"true"` | no |
| pod\_phase\_status\_extra\_tags | Extra tags for Pod phase status monitor | list | `[]` | no |
| pod\_phase\_status\_message | Custom message for Pod phase status monitor | string | `""` | no |
| pod\_phase\_status\_silenced | Groups to mute for Pod phase status monitor | map | `{}` | no |
| pod\_phase\_status\_time\_aggregator | Monitor aggregator for Pod phase status [available values: min, max or avg] | string | `"max"` | no |
| pod\_phase\_status\_timeframe | Monitor timeframe for Pod phase status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |

View File

@ -40,12 +40,6 @@ variable "prefix_slug" {
# Datadog monitors variables
variable "pod_phase_status_silenced" {
description = "Groups to mute for Pod phase status monitor"
type = "map"
default = {}
}
variable "pod_phase_status_enabled" {
description = "Flag to enable Pod phase status monitor"
type = "string"
@ -76,12 +70,6 @@ variable "pod_phase_status_timeframe" {
default = "last_5m"
}
variable "error_silenced" {
description = "Groups to mute for Pod errors monitor"
type = "map"
default = {}
}
variable "error_enabled" {
description = "Flag to enable Pod errors monitor"
type = "string"

View File

@ -24,14 +24,13 @@ resource "datadog_monitor" "pod_phase_status" {
locked = false
require_full_window = true
silenced = "${var.pod_phase_status_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-pod", "team:claranet", "created-by:terraform", "${var.pod_phase_status_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-pod", "team:claranet", "created-by:terraform", "${var.pod_phase_status_extra_tags}"]
}
resource "datadog_monitor" "error" {
count = "${var.error_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Pod waiting errors"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.error_message, var.message)}"
query = <<EOQ
@ -56,6 +55,5 @@ resource "datadog_monitor" "error" {
locked = false
require_full_window = true
silenced = "${var.error_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-pod", "team:claranet", "created-by:terraform", "${var.error_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-pod", "team:claranet", "created-by:terraform", "${var.error_extra_tags}"]
}

View File

@ -29,7 +29,6 @@ Creates DataDog monitors with the following checks:
| cronjob\_enabled | Flag to enable Cronjob monitor | string | `"true"` | no |
| cronjob\_extra\_tags | Extra tags for Cronjob monitor | list | `[]` | no |
| cronjob\_message | Custom message for Cronjob monitor | string | `""` | no |
| cronjob\_silenced | Groups to mute for Cronjob monitor | map | `{}` | no |
| cronjob\_threshold\_warning | Cronjob monitor (warning threshold) | string | `"3"` | no |
| environment | Architecture environment | string | n/a | yes |
| evaluation\_delay | Delay in seconds for the metric evaluation | string | `"15"` | no |
@ -39,7 +38,6 @@ Creates DataDog monitors with the following checks:
| job\_enabled | Flag to enable Job monitor | string | `"true"` | no |
| job\_extra\_tags | Extra tags for Job monitor | list | `[]` | no |
| job\_message | Custom message for Job monitor | string | `""` | no |
| job\_silenced | Groups to mute for Job monitor | map | `{}` | no |
| job\_threshold\_warning | Job monitor (warning threshold) | string | `"3"` | no |
| message | Message sent when a monitor is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |
@ -47,21 +45,18 @@ Creates DataDog monitors with the following checks:
| replica\_available\_enabled | Flag to enable Available replica monitor | string | `"true"` | no |
| replica\_available\_extra\_tags | Extra tags for Available replicamonitor | list | `[]` | no |
| replica\_available\_message | Custom message for Available replica monitor | string | `""` | no |
| replica\_available\_silenced | Groups to mute for Available replica monitor | map | `{}` | no |
| replica\_available\_threshold\_critical | Available replica critical threshold | string | `"1"` | no |
| replica\_available\_time\_aggregator | Monitor aggregator for Available replica [available values: min, max or avg] | string | `"max"` | no |
| replica\_available\_timeframe | Monitor timeframe for Available replica [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
| replica\_current\_enabled | Flag to enable Current replica monitor | string | `"true"` | no |
| replica\_current\_extra\_tags | Extra tags for Current replica monitor | list | `[]` | no |
| replica\_current\_message | Custom message for Current replica monitor | string | `""` | no |
| replica\_current\_silenced | Groups to mute for Current replica monitor | map | `{}` | no |
| replica\_current\_threshold\_critical | Current replica critical threshold | string | `"1"` | no |
| replica\_current\_time\_aggregator | Monitor aggregator for Current replica [available values: min, max or avg] | string | `"max"` | no |
| replica\_current\_timeframe | Monitor timeframe for Current replica [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
| replica\_ready\_enabled | Flag to enable Ready replica monitor | string | `"true"` | no |
| replica\_ready\_extra\_tags | Extra tags for Ready replica monitor | list | `[]` | no |
| replica\_ready\_message | Custom message for Ready replica monitor | string | `""` | no |
| replica\_ready\_silenced | Groups to mute for Ready replica monitor | map | `{}` | no |
| replica\_ready\_threshold\_critical | Ready replica critical threshold | string | `"1"` | no |
| replica\_ready\_time\_aggregator | Monitor aggregator for Ready replica [available values: min, max or avg] | string | `"max"` | no |
| replica\_ready\_timeframe | Monitor timeframe for Ready replica [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -40,12 +40,6 @@ variable "prefix_slug" {
# Datadog monitors variables
variable "job_silenced" {
description = "Groups to mute for Job monitor"
type = "map"
default = {}
}
variable "job_enabled" {
description = "Flag to enable Job monitor"
type = "string"
@ -70,12 +64,6 @@ variable "job_threshold_warning" {
default = 3
}
variable "cronjob_silenced" {
description = "Groups to mute for Cronjob monitor"
type = "map"
default = {}
}
variable "cronjob_enabled" {
description = "Flag to enable Cronjob monitor"
type = "string"
@ -100,12 +88,6 @@ variable "cronjob_threshold_warning" {
default = 3
}
variable "replica_available_silenced" {
description = "Groups to mute for Available replica monitor"
type = "map"
default = {}
}
variable "replica_available_enabled" {
description = "Flag to enable Available replica monitor"
type = "string"
@ -141,12 +123,6 @@ variable "replica_available_threshold_critical" {
description = "Available replica critical threshold"
}
variable "replica_ready_silenced" {
description = "Groups to mute for Ready replica monitor"
type = "map"
default = {}
}
variable "replica_ready_enabled" {
description = "Flag to enable Ready replica monitor"
type = "string"
@ -182,12 +158,6 @@ variable "replica_ready_threshold_critical" {
description = "Ready replica critical threshold"
}
variable "replica_current_silenced" {
description = "Groups to mute for Current replica monitor"
type = "map"
default = {}
}
variable "replica_current_enabled" {
description = "Flag to enable Current replica monitor"
type = "string"

View File

@ -23,8 +23,6 @@ resource "datadog_monitor" "job" {
locked = false
require_full_window = true
silenced = "${var.job_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.job_extra_tags}"]
}
@ -53,15 +51,13 @@ resource "datadog_monitor" "cronjob" {
locked = false
require_full_window = true
silenced = "${var.cronjob_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.cronjob_extra_tags}"]
}
resource "datadog_monitor" "replica_available" {
count = "${var.replica_available_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Available replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.replica_available_message, var.message)}"
query = <<EOQ
@ -86,14 +82,13 @@ resource "datadog_monitor" "replica_available" {
locked = false
require_full_window = true
silenced = "${var.replica_available_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.replica_available_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.replica_available_extra_tags}"]
}
resource "datadog_monitor" "replica_ready" {
count = "${var.replica_ready_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Ready replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.replica_ready_message, var.message)}"
query = <<EOQ
@ -118,14 +113,13 @@ resource "datadog_monitor" "replica_ready" {
locked = false
require_full_window = true
silenced = "${var.replica_ready_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.replica_ready_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.replica_ready_extra_tags}"]
}
resource "datadog_monitor" "replica_current" {
count = "${var.replica_current_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kubernetes Current replicas {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.replica_current_message, var.message)}"
query = <<EOQ
@ -150,6 +144,5 @@ resource "datadog_monitor" "replica_current" {
locked = false
require_full_window = true
silenced = "${var.replica_current_silenced}"
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.replica_current_extra_tags}"]
tags = ["env:${var.environment}", "type:caas", "provider:kubernetes", "resource:kubernetes-workload", "team:claranet", "created-by:terraform", "${var.replica_current_extra_tags}"]
}

View File

@ -30,7 +30,6 @@ Creates DataDog monitors with the following checks:
| alb\_no\_healthy\_instances\_enabled | Flag to enable ALB no healthy instances monitor | string | `"true"` | no |
| alb\_no\_healthy\_instances\_extra\_tags | Extra tags for ALB no healthy instances monitor | list | `[]` | no |
| alb\_no\_healthy\_instances\_message | Custom message for ALB no healthy instances monitor | string | `""` | no |
| alb\_no\_healthy\_instances\_silenced | Groups to mute for ALB no healthy instances monitor | map | `{}` | no |
| alb\_no\_healthy\_instances\_time\_aggregator | Monitor aggregator for ALB no healthy instances [available values: min, max or avg] | string | `"min"` | no |
| alb\_no\_healthy\_instances\_timeframe | Monitor timeframe for ALB no healthy instances [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| artificial\_requests\_count | Number of false requests used to mitigate false positive in case of low trafic | string | `"5"` | no |
@ -42,7 +41,6 @@ Creates DataDog monitors with the following checks:
| httpcode\_alb\_4xx\_enabled | Flag to enable ALB httpcode 4xx monitor | string | `"true"` | no |
| httpcode\_alb\_4xx\_extra\_tags | Extra tags for ALB httpcode 4xx monitor | list | `[]` | no |
| httpcode\_alb\_4xx\_message | Custom message for ALB httpcode 4xx monitor | string | `""` | no |
| httpcode\_alb\_4xx\_silenced | Groups to mute for ALB httpcode 4xx monitor | map | `{}` | no |
| httpcode\_alb\_4xx\_threshold\_critical | loadbalancer 4xx critical threshold in percentage | string | `"80"` | no |
| httpcode\_alb\_4xx\_threshold\_warning | loadbalancer 4xx warning threshold in percentage | string | `"60"` | no |
| httpcode\_alb\_4xx\_time\_aggregator | Monitor aggregator for ALB httpcode 4xx [available values: min, max or avg] | string | `"min"` | no |
@ -50,7 +48,6 @@ Creates DataDog monitors with the following checks:
| httpcode\_alb\_5xx\_enabled | Flag to enable ALB httpcode 5xx monitor | string | `"true"` | no |
| httpcode\_alb\_5xx\_extra\_tags | Extra tags for ALB httpcode 5xx monitor | list | `[]` | no |
| httpcode\_alb\_5xx\_message | Custom message for ALB httpcode 5xx monitor | string | `""` | no |
| httpcode\_alb\_5xx\_silenced | Groups to mute for ALB httpcode 5xx monitor | map | `{}` | no |
| httpcode\_alb\_5xx\_threshold\_critical | loadbalancer 5xx critical threshold in percentage | string | `"80"` | no |
| httpcode\_alb\_5xx\_threshold\_warning | loadbalancer 5xx warning threshold in percentage | string | `"60"` | no |
| httpcode\_alb\_5xx\_time\_aggregator | Monitor aggregator for ALB httpcode 5xx [available values: min, max or avg] | string | `"min"` | no |
@ -58,7 +55,6 @@ Creates DataDog monitors with the following checks:
| httpcode\_target\_4xx\_enabled | Flag to enable ALB target httpcode 4xx monitor | string | `"true"` | no |
| httpcode\_target\_4xx\_extra\_tags | Extra tags for ALB target httpcode 4xx monitor | list | `[]` | no |
| httpcode\_target\_4xx\_message | Custom message for ALB target httpcode 4xx monitor | string | `""` | no |
| httpcode\_target\_4xx\_silenced | Groups to mute for ALB target httpcode 4xx monitor | map | `{}` | no |
| httpcode\_target\_4xx\_threshold\_critical | target 4xx critical threshold in percentage | string | `"80"` | no |
| httpcode\_target\_4xx\_threshold\_warning | target 4xx warning threshold in percentage | string | `"60"` | no |
| httpcode\_target\_4xx\_time\_aggregator | Monitor aggregator for ALB target httpcode 4xx [available values: min, max or avg] | string | `"min"` | no |
@ -66,7 +62,6 @@ Creates DataDog monitors with the following checks:
| httpcode\_target\_5xx\_enabled | Flag to enable ALB target httpcode 5xx monitor | string | `"true"` | no |
| httpcode\_target\_5xx\_extra\_tags | Extra tags for ALB target httpcode 5xx monitor | list | `[]` | no |
| httpcode\_target\_5xx\_message | Custom message for ALB target httpcode 5xx monitor | string | `""` | no |
| httpcode\_target\_5xx\_silenced | Groups to mute for ALB target httpcode 5xx monitor | map | `{}` | no |
| httpcode\_target\_5xx\_threshold\_critical | target 5xx critical threshold in percentage | string | `"80"` | no |
| httpcode\_target\_5xx\_threshold\_warning | target 5xx warning threshold in percentage | string | `"60"` | no |
| httpcode\_target\_5xx\_time\_aggregator | Monitor aggregator for ALB target httpcode 5xx [available values: min, max or avg] | string | `"min"` | no |
@ -74,7 +69,6 @@ Creates DataDog monitors with the following checks:
| latency\_enabled | Flag to enable ALB latency monitor | string | `"true"` | no |
| latency\_extra\_tags | Extra tags for ALB latency monitor | list | `[]` | no |
| latency\_message | Custom message for ALB latency monitor | string | `""` | no |
| latency\_silenced | Groups to mute for ALB latency monitor | map | `{}` | no |
| latency\_threshold\_critical | latency critical threshold in milliseconds | string | `"1000"` | no |
| latency\_threshold\_warning | latency warning threshold in milliseconds | string | `"500"` | no |
| latency\_time\_aggregator | Monitor aggregator for ALB latency [available values: min, max or avg] | string | `"min"` | no |

View File

@ -41,12 +41,6 @@ variable "prefix_slug" {
# Datadog monitors variables
variable "alb_no_healthy_instances_silenced" {
description = "Groups to mute for ALB no healthy instances monitor"
type = "map"
default = {}
}
variable "alb_no_healthy_instances_enabled" {
description = "Flag to enable ALB no healthy instances monitor"
type = "string"
@ -77,12 +71,6 @@ variable "alb_no_healthy_instances_timeframe" {
default = "last_5m"
}
variable "latency_silenced" {
description = "Groups to mute for ALB latency monitor"
type = "map"
default = {}
}
variable "latency_enabled" {
description = "Flag to enable ALB latency monitor"
type = "string"
@ -123,12 +111,6 @@ variable "latency_threshold_warning" {
description = "latency warning threshold in milliseconds"
}
variable "httpcode_alb_4xx_silenced" {
description = "Groups to mute for ALB httpcode 4xx monitor"
type = "map"
default = {}
}
variable "httpcode_alb_4xx_enabled" {
description = "Flag to enable ALB httpcode 4xx monitor"
type = "string"
@ -169,12 +151,6 @@ variable "httpcode_alb_4xx_threshold_warning" {
description = "loadbalancer 4xx warning threshold in percentage"
}
variable "httpcode_target_4xx_silenced" {
description = "Groups to mute for ALB target httpcode 4xx monitor"
type = "map"
default = {}
}
variable "httpcode_target_4xx_enabled" {
description = "Flag to enable ALB target httpcode 4xx monitor"
type = "string"
@ -215,12 +191,6 @@ variable "httpcode_target_4xx_threshold_warning" {
description = "target 4xx warning threshold in percentage"
}
variable "httpcode_alb_5xx_silenced" {
description = "Groups to mute for ALB httpcode 5xx monitor"
type = "map"
default = {}
}
variable "httpcode_alb_5xx_enabled" {
description = "Flag to enable ALB httpcode 5xx monitor"
type = "string"
@ -261,12 +231,6 @@ variable "httpcode_alb_5xx_threshold_warning" {
description = "loadbalancer 5xx warning threshold in percentage"
}
variable "httpcode_target_5xx_silenced" {
description = "Groups to mute for ALB target httpcode 5xx monitor"
type = "map"
default = {}
}
variable "httpcode_target_5xx_enabled" {
description = "Flag to enable ALB target httpcode 5xx monitor"
type = "string"

View File

@ -1,7 +1,7 @@
resource "datadog_monitor" "ALB_no_healthy_instances" {
count = "${var.alb_no_healthy_instances_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB healthy instances {{#is_alert}}is at 0{{/is_alert}}{{#is_warning}}is at {{value}}%{{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.alb_no_healthy_instances_message, var.message)}"
query = <<EOQ
@ -26,15 +26,13 @@ resource "datadog_monitor" "ALB_no_healthy_instances" {
timeout_h = 0
include_tags = true
silenced = "${var.alb_no_healthy_instances_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:alb", "team:claranet", "created-by:terraform", "${var.alb_no_healthy_instances_extra_tags}"]
}
resource "datadog_monitor" "ALB_latency" {
count = "${var.latency_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB latency {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.latency_message, var.message)}"
query = <<EOQ
@ -57,15 +55,13 @@ resource "datadog_monitor" "ALB_latency" {
timeout_h = 0
include_tags = true
silenced = "${var.latency_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:alb", "team:claranet", "created-by:terraform", "${var.latency_extra_tags}"]
}
resource "datadog_monitor" "ALB_httpcode_5xx" {
count = "${var.httpcode_alb_5xx_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB HTTP code 5xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.httpcode_alb_5xx_message, var.message)}"
query = <<EOQ
@ -89,15 +85,13 @@ resource "datadog_monitor" "ALB_httpcode_5xx" {
timeout_h = 0
include_tags = true
silenced = "${var.httpcode_alb_5xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:alb", "team:claranet", "created-by:terraform", "${var.httpcode_alb_5xx_extra_tags}"]
}
resource "datadog_monitor" "ALB_httpcode_4xx" {
count = "${var.httpcode_alb_4xx_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB HTTP code 4xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.httpcode_alb_4xx_message, var.message)}"
query = <<EOQ
@ -121,15 +115,13 @@ resource "datadog_monitor" "ALB_httpcode_4xx" {
timeout_h = 0
include_tags = true
silenced = "${var.httpcode_alb_4xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:alb", "team:claranet", "created-by:terraform", "${var.httpcode_alb_4xx_extra_tags}"]
}
resource "datadog_monitor" "ALB_httpcode_target_5xx" {
count = "${var.httpcode_target_5xx_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB target HTTP code 5xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.httpcode_target_5xx_message, var.message)}"
query = <<EOQ
@ -153,15 +145,13 @@ resource "datadog_monitor" "ALB_httpcode_target_5xx" {
timeout_h = 0
include_tags = true
silenced = "${var.httpcode_target_5xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:alb", "team:claranet", "created-by:terraform", "${var.httpcode_target_5xx_extra_tags}"]
}
resource "datadog_monitor" "ALB_httpcode_target_4xx" {
count = "${var.httpcode_target_4xx_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ALB target HTTP code 4xx {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.httpcode_target_4xx_message, var.message)}"
query = <<EOQ
@ -185,7 +175,5 @@ resource "datadog_monitor" "ALB_httpcode_target_4xx" {
timeout_h = 0
include_tags = true
silenced = "${var.httpcode_target_4xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:alb", "team:claranet", "created-by:terraform", "${var.httpcode_target_4xx_extra_tags}"]
}

View File

@ -31,7 +31,6 @@ Creates DataDog monitors with the following checks:
| http\_4xx\_requests\_enabled | Flag to enable API Gateway HTTP 4xx requests monitor | string | `"true"` | no |
| http\_4xx\_requests\_extra\_tags | Extra tags for API Gateway HTTP 4xx requests monitor | list | `[]` | no |
| http\_4xx\_requests\_message | Custom message for API Gateway HTTP 4xx requests monitor | string | `""` | no |
| http\_4xx\_requests\_silenced | Groups to mute for API Gateway HTTP 4xx requests monitor | map | `{}` | no |
| http\_4xx\_requests\_threshold\_critical | Maximum critical acceptable percent of 4xx errors | string | `"30"` | no |
| http\_4xx\_requests\_threshold\_warning | Maximum warning acceptable percent of 4xx errors | string | `"15"` | no |
| http\_4xx\_requests\_time\_aggregator | Monitor aggregator for API HTTP 4xx requests [available values: min, max or avg] | string | `"min"` | no |
@ -39,7 +38,6 @@ Creates DataDog monitors with the following checks:
| http\_5xx\_requests\_enabled | Flag to enable API Gateway HTTP 5xx requests monitor | string | `"true"` | no |
| http\_5xx\_requests\_extra\_tags | Extra tags for API Gateway HTTP 5xx requests monitor | list | `[]` | no |
| http\_5xx\_requests\_message | Custom message for API Gateway HTTP 5xx requests monitor | string | `""` | no |
| http\_5xx\_requests\_silenced | Groups to mute for API Gateway HTTP 5xx requests monitor | map | `{}` | no |
| http\_5xx\_requests\_threshold\_critical | Maximum critical acceptable percent of 5xx errors | string | `"20"` | no |
| http\_5xx\_requests\_threshold\_warning | Maximum warning acceptable percent of 5xx errors | string | `"10"` | no |
| http\_5xx\_requests\_time\_aggregator | Monitor aggregator for API HTTP 5xx requests [available values: min, max or avg] | string | `"min"` | no |
@ -47,7 +45,6 @@ Creates DataDog monitors with the following checks:
| latency\_enabled | Flag to enable API Gateway latency monitor | string | `"true"` | no |
| latency\_extra\_tags | Extra tags for API Gateway latency monitor | list | `[]` | no |
| latency\_message | Custom message for API Gateway latency monitor | string | `""` | no |
| latency\_silenced | Groups to mute for API Gateway latency monitor | map | `{}` | no |
| latency\_threshold\_critical | Alerting threshold in milliseconds | string | `"800"` | no |
| latency\_threshold\_warning | Warning threshold in milliseconds | string | `"400"` | no |
| latency\_time\_aggregator | Monitor aggregator for API Gateway latency [available values: min, max or avg] | string | `"min"` | no |

View File

@ -31,12 +31,6 @@ variable "prefix_slug" {
### LATENCY VARIABLES ###
###################################
variable "latency_silenced" {
description = "Groups to mute for API Gateway latency monitor"
type = "map"
default = {}
}
variable "latency_enabled" {
description = "Flag to enable API Gateway latency monitor"
type = "string"
@ -81,12 +75,6 @@ variable "latency_threshold_warning" {
### HTTP 5xx status pages ###
#################################
variable "http_5xx_requests_silenced" {
description = "Groups to mute for API Gateway HTTP 5xx requests monitor"
type = "map"
default = {}
}
variable "http_5xx_requests_enabled" {
description = "Flag to enable API Gateway HTTP 5xx requests monitor"
type = "string"
@ -131,12 +119,6 @@ variable "http_5xx_requests_threshold_warning" {
### HTTP 4xx status pages ###
#################################
variable "http_4xx_requests_silenced" {
description = "Groups to mute for API Gateway HTTP 4xx requests monitor"
type = "map"
default = {}
}
variable "http_4xx_requests_enabled" {
description = "Flag to enable API Gateway HTTP 4xx requests monitor"
type = "string"

View File

@ -2,7 +2,7 @@
resource "datadog_monitor" "API_Gateway_latency" {
count = "${var.latency_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Gateway latency {{#is_alert}}{{{comparator}}} {{threshold}}ms ({{value}}ms){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}ms ({{value}}ms){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.latency_message, var.message)}"
query = <<EOQ
@ -25,8 +25,6 @@ resource "datadog_monitor" "API_Gateway_latency" {
timeout_h = 0
include_tags = true
silenced = "${var.latency_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:apigateway", "team:claranet", "created-by:terraform", "${var.latency_extra_tags}"]
}
@ -34,7 +32,7 @@ resource "datadog_monitor" "API_Gateway_latency" {
resource "datadog_monitor" "API_http_5xx_errors_count" {
count = "${var.http_5xx_requests_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Gateway HTTP 5xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.http_5xx_requests_message, var.message)}"
query = <<EOQ
@ -58,8 +56,6 @@ resource "datadog_monitor" "API_http_5xx_errors_count" {
timeout_h = 1
include_tags = true
silenced = "${var.http_5xx_requests_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:apigateway", "team:claranet", "created-by:terraform", "${var.http_5xx_requests_extra_tags}"]
}
@ -67,7 +63,7 @@ resource "datadog_monitor" "API_http_5xx_errors_count" {
resource "datadog_monitor" "API_http_4xx_errors_count" {
count = "${var.http_4xx_requests_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] API Gateway HTTP 4xx errors {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.http_4xx_requests_message, var.message)}"
query = <<EOQ
@ -91,7 +87,5 @@ resource "datadog_monitor" "API_http_4xx_errors_count" {
timeout_h = 1
include_tags = true
silenced = "${var.http_4xx_requests_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:apigateway", "team:claranet", "created-by:terraform", "${var.http_4xx_requests_extra_tags}"]
}

View File

@ -35,12 +35,10 @@ Creates DataDog monitors with the following checks:
| eviction\_growing\_enabled | Flag to enable Elasticache eviction growing monitor | string | `"true"` | no |
| eviction\_growing\_extra\_tags | Extra tags for Elasticache eviction growing monitor | list | `[]` | no |
| eviction\_growing\_message | Custom message for Elasticache eviction growing monitor | string | `""` | no |
| eviction\_growing\_silenced | Groups to mute for Elasticache eviction growing monitor | map | `{}` | no |
| eviction\_growing\_threshold\_critical | Elasticache eviction growing critical threshold in percentage | string | `"30"` | no |
| eviction\_growing\_threshold\_warning | Elasticache eviction growing warning threshold in percentage | string | `"10"` | no |
| eviction\_growing\_timeframe | Monitor timeframe for Elasticache eviction growing [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| eviction\_message | Custom message for Elasticache eviction monitor | string | `""` | no |
| eviction\_silenced | Groups to mute for Elasticache eviction monitor | map | `{}` | no |
| eviction\_threshold\_critical | Elasticache free memory critical threshold in percentage | string | `"30"` | no |
| eviction\_threshold\_warning | Elasticache free memory warning threshold in percentage | string | `"0"` | no |
| eviction\_timeframe | Monitor timeframe for Elasticache eviction [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
@ -51,14 +49,12 @@ Creates DataDog monitors with the following checks:
| free\_memory\_enabled | Flag to enable Elasticache free memory monitor | string | `"true"` | no |
| free\_memory\_extra\_tags | Extra tags for Elasticache free memory monitor | list | `[]` | no |
| free\_memory\_message | Custom message for Elasticache free memory monitor | string | `""` | no |
| free\_memory\_silenced | Groups to mute for Elasticache free memory monitor | map | `{}` | no |
| free\_memory\_threshold\_critical | Elasticache free memory critical threshold in percentage | string | `"-70"` | no |
| free\_memory\_threshold\_warning | Elasticache free memory warning threshold in percentage | string | `"-50"` | no |
| free\_memory\_timeframe | Monitor timeframe for Elasticache free memory [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
| max\_connection\_enabled | Flag to enable Elasticache max connection monitor | string | `"true"` | no |
| max\_connection\_extra\_tags | Extra tags for Elasticache max connection monitor | list | `[]` | no |
| max\_connection\_message | Custom message for Elasticache max connection monitor | string | `""` | no |
| max\_connection\_silenced | Groups to mute for Elasticache max connection monitor | map | `{}` | no |
| max\_connection\_time\_aggregator | Monitor aggregator for Elasticache max connection [available values: min, max or avg] | string | `"max"` | no |
| max\_connection\_timeframe | Monitor timeframe for Elasticache max connection [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes |
@ -66,14 +62,12 @@ Creates DataDog monitors with the following checks:
| no\_connection\_enabled | Flag to enable Elasticache no connection monitor | string | `"true"` | no |
| no\_connection\_extra\_tags | Extra tags for Elasticache no connection monitor | list | `[]` | no |
| no\_connection\_message | Custom message for Elasticache no connection monitor | string | `""` | no |
| no\_connection\_silenced | Groups to mute for Elasticache no connection monitor | map | `{}` | no |
| no\_connection\_time\_aggregator | Monitor aggregator for Elasticache no connection [available values: min, max or avg] | string | `"min"` | no |
| no\_connection\_timeframe | Monitor timeframe for Elasticache no connection [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| swap\_enabled | Flag to enable Elasticache swap monitor | string | `"true"` | no |
| swap\_extra\_tags | Extra tags for Elasticache swap monitor | list | `[]` | no |
| swap\_message | Custom message for Elasticache swap monitor | string | `""` | no |
| swap\_silenced | Groups to mute for Elasticache swap monitor | map | `{}` | no |
| swap\_threshold\_critical | Elasticache swap critical threshold in bytes | string | `"50000000"` | no |
| swap\_threshold\_warning | Elasticache swap warning threshold in bytes | string | `"0"` | no |
| swap\_time\_aggregator | Monitor aggregator for Elasticache memcached swap [available values: min, max or avg] | string | `"min"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# Elasticache specific
variable "eviction_silenced" {
description = "Groups to mute for Elasticache eviction monitor"
type = "map"
default = {}
}
variable "eviction_enabled" {
description = "Flag to enable Elasticache eviction monitor"
@ -82,12 +77,6 @@ variable "eviction_threshold_critical" {
default = 30
}
variable "max_connection_silenced" {
description = "Groups to mute for Elasticache max connection monitor"
type = "map"
default = {}
}
variable "max_connection_enabled" {
description = "Flag to enable Elasticache max connection monitor"
type = "string"
@ -118,12 +107,6 @@ variable "max_connection_timeframe" {
default = "last_5m"
}
variable "no_connection_silenced" {
description = "Groups to mute for Elasticache no connection monitor"
type = "map"
default = {}
}
variable "no_connection_enabled" {
description = "Flag to enable Elasticache no connection monitor"
type = "string"
@ -154,12 +137,6 @@ variable "no_connection_timeframe" {
default = "last_5m"
}
variable "swap_silenced" {
description = "Groups to mute for Elasticache swap monitor"
type = "map"
default = {}
}
variable "swap_enabled" {
description = "Flag to enable Elasticache swap monitor"
type = "string"
@ -202,12 +179,6 @@ variable "swap_threshold_critical" {
default = 50000000
}
variable "free_memory_silenced" {
description = "Groups to mute for Elasticache free memory monitor"
type = "map"
default = {}
}
variable "free_memory_enabled" {
description = "Flag to enable Elasticache free memory monitor"
type = "string"
@ -250,12 +221,6 @@ variable "free_memory_threshold_critical" {
default = -70
}
variable "eviction_growing_silenced" {
description = "Groups to mute for Elasticache eviction growing monitor"
type = "map"
default = {}
}
variable "eviction_growing_enabled" {
description = "Flag to enable Elasticache eviction growing monitor"
type = "string"

View File

@ -3,7 +3,7 @@ resource "datadog_monitor" "elasticache_eviction" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache eviction {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}"
message = "${coalesce(var.eviction_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
sum(${var.eviction_timeframe}): (
@ -26,8 +26,6 @@ resource "datadog_monitor" "elasticache_eviction" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.eviction_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache", "team:claranet", "created-by:terraform", "${var.eviction_extra_tags}"]
}
@ -36,7 +34,7 @@ resource "datadog_monitor" "elasticache_max_connection" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache max connections reached {{#is_alert}}{{{comparator}}} {{threshold}} {{/is_alert}}"
message = "${coalesce(var.max_connection_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.max_connection_time_aggregator}(${var.max_connection_timeframe}): (
@ -54,8 +52,6 @@ resource "datadog_monitor" "elasticache_max_connection" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.max_connection_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache", "team:claranet", "created-by:terraform", "${var.max_connection_extra_tags}"]
}
@ -64,7 +60,7 @@ resource "datadog_monitor" "elasticache_no_connection" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache connections {{#is_alert}}{{{comparator}}} {{threshold}} {{/is_alert}}"
message = "${coalesce(var.no_connection_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.no_connection_time_aggregator}(${var.no_connection_timeframe}): (
@ -82,8 +78,6 @@ resource "datadog_monitor" "elasticache_no_connection" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.no_connection_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache", "team:claranet", "created-by:terraform", "${var.no_connection_extra_tags}"]
}
@ -92,7 +86,7 @@ resource "datadog_monitor" "elasticache_swap" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache swap {{#is_alert}}{{{comparator}}} {{threshold}}MB ({{value}}MB){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}MB ({{value}}MB){{/is_warning}}"
message = "${coalesce(var.swap_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.swap_time_aggregator}(${var.swap_timeframe}): (
@ -115,8 +109,6 @@ resource "datadog_monitor" "elasticache_swap" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.swap_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache", "team:claranet", "created-by:terraform", "${var.swap_extra_tags}"]
}
@ -125,7 +117,7 @@ resource "datadog_monitor" "elasticache_free_memory" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache free memory {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.free_memory_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
pct_change(avg(${var.free_memory_timeframe}),${var.free_memory_condition_timeframe}):
@ -148,8 +140,6 @@ resource "datadog_monitor" "elasticache_free_memory" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.free_memory_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache", "team:claranet", "created-by:terraform", "${var.free_memory_extra_tags}"]
}
@ -158,7 +148,7 @@ resource "datadog_monitor" "elasticache_eviction_growing" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache evictions is growing {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
message = "${coalesce(var.eviction_growing_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
pct_change(avg(${var.eviction_growing_timeframe}),${var.eviction_growing_condition_timeframe}):
@ -181,7 +171,5 @@ resource "datadog_monitor" "elasticache_eviction_growing" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.eviction_growing_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache", "team:claranet", "created-by:terraform", "${var.eviction_growing_extra_tags}"]
}

View File

@ -26,7 +26,6 @@ Creates DataDog monitors with the following checks:
| cpu\_high\_enabled | Flag to enable Elasticache memcached cpu high monitor | string | `"true"` | no |
| cpu\_high\_extra\_tags | Extra tags for Elasticache memcached cpu high monitor | list | `[]` | no |
| cpu\_high\_message | Custom message for Elasticache memcached cpu high monitor | string | `""` | no |
| cpu\_high\_silenced | Groups to mute for Elasticache memcached cpu high monitor | map | `{}` | no |
| cpu\_high\_threshold\_critical | Elasticache memcached cpu high critical threshold in percentage | string | `"90"` | no |
| cpu\_high\_threshold\_warning | Elasticache memcached cpu high warning threshold in percentage | string | `"75"` | no |
| cpu\_high\_time\_aggregator | Monitor aggregator for Elasticache memcached cpu high [available values: min, max or avg] | string | `"min"` | no |
@ -39,7 +38,6 @@ Creates DataDog monitors with the following checks:
| get\_hits\_enabled | Flag to enable Elasticache memcached get hits monitor | string | `"true"` | no |
| get\_hits\_extra\_tags | Extra tags for Elasticache memcached get hits monitor | list | `[]` | no |
| get\_hits\_message | Custom message for Elasticache memcached get hits monitor | string | `""` | no |
| get\_hits\_silenced | Groups to mute for Elasticache memcached get hits monitor | map | `{}` | no |
| get\_hits\_threshold\_critical | Elasticache memcached get hits critical threshold in percentage | string | `"60"` | no |
| get\_hits\_threshold\_warning | Elasticache memcached get hits warning threshold in percentage | string | `"80"` | no |
| get\_hits\_time\_aggregator | Monitor aggregator for Elasticache memcached get hits [available values: min, max or avg] | string | `"max"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# Memcached specific
variable "get_hits_silenced" {
description = "Groups to mute for Elasticache memcached get hits monitor"
type = "map"
default = {}
}
variable "get_hits_enabled" {
description = "Flag to enable Elasticache memcached get hits monitor"
@ -88,12 +83,6 @@ variable "get_hits_threshold_critical" {
default = 60
}
variable "cpu_high_silenced" {
description = "Groups to mute for Elasticache memcached cpu high monitor"
type = "map"
default = {}
}
variable "cpu_high_enabled" {
description = "Flag to enable Elasticache memcached cpu high monitor"
type = "string"

View File

@ -3,7 +3,7 @@ resource "datadog_monitor" "memcached_get_hits" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache memcached cache hit ratio {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.get_hits_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.get_hits_time_aggregator}(${var.get_hits_timeframe}): (
@ -28,8 +28,6 @@ resource "datadog_monitor" "memcached_get_hits" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.get_hits_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache-memcached", "team:claranet", "created-by:terraform", "engine:memcached", "${var.get_hits_extra_tags}"]
}
@ -38,7 +36,7 @@ resource "datadog_monitor" "memcached_cpu_high" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache memcached CPU {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_high_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.cpu_high_time_aggregator}(${var.cpu_high_timeframe}): (
@ -61,7 +59,5 @@ resource "datadog_monitor" "memcached_cpu_high" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.cpu_high_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache-memcached", "team:claranet", "created-by:terraform", "engine:memcached", "${var.cpu_high_extra_tags}"]
}

View File

@ -28,7 +28,6 @@ Creates DataDog monitors with the following checks:
| cache\_hits\_enabled | Flag to enable Elasticache redis cache hits monitor | string | `"true"` | no |
| cache\_hits\_extra\_tags | Extra tags for Elasticache redis cache hits monitor | list | `[]` | no |
| cache\_hits\_message | Custom message for Elasticache redis cache hits monitor | string | `""` | no |
| cache\_hits\_silenced | Groups to mute for Elasticache redis cache hits monitor | map | `{}` | no |
| cache\_hits\_threshold\_critical | Elasticache redis cache hits critical threshold in percentage | string | `"60"` | no |
| cache\_hits\_threshold\_warning | Elasticache redis cache hits warning threshold in percentage | string | `"80"` | no |
| cache\_hits\_time\_aggregator | Monitor aggregator for Elasticache redis cache hits [available values: min, max or avg] | string | `"max"` | no |
@ -36,12 +35,10 @@ Creates DataDog monitors with the following checks:
| commands\_enabled | Flag to enable Elasticache redis commands monitor | string | `"true"` | no |
| commands\_extra\_tags | Extra tags for Elasticache redis commands monitor | list | `[]` | no |
| commands\_message | Custom message for Elasticache redis commands monitor | string | `""` | no |
| commands\_silenced | Groups to mute for Elasticache redis commands monitor | map | `{}` | no |
| commands\_timeframe | Monitor timeframe for Elasticache redis commands [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| cpu\_high\_enabled | Flag to enable Elasticache redis cpu high monitor | string | `"true"` | no |
| cpu\_high\_extra\_tags | Extra tags for Elasticache redis cpu high monitor | list | `[]` | no |
| cpu\_high\_message | Custom message for Elasticache redis cpu high monitor | string | `""` | no |
| cpu\_high\_silenced | Groups to mute for Elasticache redis cpu high monitor | map | `{}` | no |
| cpu\_high\_threshold\_critical | Elasticache redis cpu high critical threshold in percentage | string | `"90"` | no |
| cpu\_high\_threshold\_warning | Elasticache redis cpu high warning threshold in percentage | string | `"75"` | no |
| cpu\_high\_time\_aggregator | Monitor aggregator for Elasticache redis cpu high [available values: min, max or avg] | string | `"min"` | no |
@ -57,7 +54,6 @@ Creates DataDog monitors with the following checks:
| replication\_lag\_enabled | Flag to enable Elasticache redis replication lag monitor | string | `"true"` | no |
| replication\_lag\_extra\_tags | Extra tags for Elasticache redis replication lag monitor | list | `[]` | no |
| replication\_lag\_message | Custom message for Elasticache redis replication lag monitor | string | `""` | no |
| replication\_lag\_silenced | Groups to mute for Elasticache redis replication lag monitor | map | `{}` | no |
| replication\_lag\_threshold\_critical | Elasticache redis replication lag critical threshold in seconds | string | `"180"` | no |
| replication\_lag\_threshold\_warning | Elasticache redis replication lag warning threshold in seconds | string | `"90"` | no |
| replication\_lag\_time\_aggregator | Monitor aggregator for Elasticache redis replication lag [available values: min, max or avg] | string | `"min"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# redis specific
variable "cache_hits_silenced" {
description = "Groups to mute for Elasticache redis cache hits monitor"
type = "map"
default = {}
}
variable "cache_hits_enabled" {
description = "Flag to enable Elasticache redis cache hits monitor"
@ -88,12 +83,6 @@ variable "cache_hits_threshold_critical" {
default = 60
}
variable "cpu_high_silenced" {
description = "Groups to mute for Elasticache redis cpu high monitor"
type = "map"
default = {}
}
variable "cpu_high_enabled" {
description = "Flag to enable Elasticache redis cpu high monitor"
type = "string"
@ -136,12 +125,6 @@ variable "cpu_high_threshold_critical" {
default = 90
}
variable "replication_lag_silenced" {
description = "Groups to mute for Elasticache redis replication lag monitor"
type = "map"
default = {}
}
variable "replication_lag_enabled" {
description = "Flag to enable Elasticache redis replication lag monitor"
type = "string"
@ -184,12 +167,6 @@ variable "replication_lag_threshold_critical" {
default = 180
}
variable "commands_silenced" {
description = "Groups to mute for Elasticache redis commands monitor"
type = "map"
default = {}
}
variable "commands_enabled" {
description = "Flag to enable Elasticache redis commands monitor"
type = "string"

View File

@ -3,7 +3,7 @@ resource "datadog_monitor" "redis_cache_hits" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis cache hit ratio {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cache_hits_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.cache_hits_time_aggregator}(${var.cache_hits_timeframe}): default(
@ -28,8 +28,6 @@ resource "datadog_monitor" "redis_cache_hits" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.cache_hits_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache-redis", "team:claranet", "created-by:terraform", "engine:redis", "${var.cache_hits_extra_tags}"]
}
@ -38,7 +36,7 @@ resource "datadog_monitor" "redis_cpu_high" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis CPU {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_high_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.cpu_high_time_aggregator}(${var.cpu_high_timeframe}): (
@ -56,8 +54,6 @@ resource "datadog_monitor" "redis_cpu_high" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.cpu_high_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache-redis", "team:claranet", "created-by:terraform", "engine:redis", "${var.cpu_high_extra_tags}"]
}
@ -66,7 +62,7 @@ resource "datadog_monitor" "redis_replication_lag" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis replication lag {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
message = "${coalesce(var.replication_lag_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.replication_lag_time_aggregator}(${var.replication_lag_timeframe}): (
@ -89,8 +85,6 @@ resource "datadog_monitor" "redis_replication_lag" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.replication_lag_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache-redis", "team:claranet", "created-by:terraform", "engine:redis", "${var.replication_lag_extra_tags}"]
}
@ -99,7 +93,7 @@ resource "datadog_monitor" "redis_commands" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Elasticache redis is receiving no commands"
message = "${coalesce(var.commands_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
sum(${var.commands_timeframe}): (
@ -118,7 +112,5 @@ resource "datadog_monitor" "redis_commands" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.commands_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticache-redis", "team:claranet", "created-by:terraform", "engine:redis", "${var.commands_extra_tags}"]
}

View File

@ -29,7 +29,6 @@ Creates DataDog monitors with the following checks:
| cpu\_enabled | Flag to enable ES cluster cpu monitor | string | `"true"` | no |
| cpu\_extra\_tags | Extra tags for ES cluster cpu monitor | list | `[]` | no |
| cpu\_message | Custom message for ES cluster cpu monitor | string | `""` | no |
| cpu\_silenced | Groups to mute for ES cluster cpu monitor | map | `{}` | no |
| cpu\_threshold\_critical | CPU usage in percent (critical threshold) | string | `"90"` | no |
| cpu\_threshold\_warning | CPU usage in percent (warning threshold) | string | `"80"` | no |
| cpu\_time\_aggregator | Monitor aggregator for ES cluster cpu [available values: min, max or avg] | string | `"min"` | no |
@ -37,7 +36,6 @@ Creates DataDog monitors with the following checks:
| diskspace\_enabled | Flag to enable ES cluster diskspace monitor | string | `"true"` | no |
| diskspace\_extra\_tags | Extra tags for ES cluster diskspace monitor | list | `[]` | no |
| diskspace\_message | Custom message for ES cluster diskspace monitor | string | `""` | no |
| diskspace\_silenced | Groups to mute for ES cluster diskspace monitor | map | `{}` | no |
| diskspace\_threshold\_critical | Disk free space in percent (critical threshold) | string | `"10"` | no |
| diskspace\_threshold\_warning | Disk free space in percent (warning threshold) | string | `"20"` | no |
| diskspace\_time\_aggregator | Monitor aggregator for ES cluster diskspace [available values: min, max or avg] | string | `"max"` | no |
@ -46,7 +44,6 @@ Creates DataDog monitors with the following checks:
| es\_cluster\_status\_enabled | Flag to enable ES cluster status monitor | string | `"true"` | no |
| es\_cluster\_status\_extra\_tags | Extra tags for ES cluster status monitor | list | `[]` | no |
| es\_cluster\_status\_message | Custom message for ES cluster status monitor | string | `""` | no |
| es\_cluster\_status\_silenced | Groups to mute for ES cluster status monitor | map | `{}` | no |
| es\_cluster\_status\_timeframe | Monitor timeframe for ES cluster status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_30m"` | no |
| es\_cluster\_volume\_size | ElasticSearch Domain volume size (in GB) | string | n/a | yes |
| evaluation\_delay | Delay in seconds for the metric evaluation | string | `"900"` | no |

View File

@ -41,12 +41,6 @@ variable "filter_tags_custom_excluded" {
# AWS ElasticSearch Service specific
variable "es_cluster_status_silenced" {
description = "Groups to mute for ES cluster status monitor"
type = "map"
default = {}
}
variable "es_cluster_status_enabled" {
description = "Flag to enable ES cluster status monitor"
type = "string"
@ -75,12 +69,6 @@ variable "es_cluster_volume_size" {
description = "ElasticSearch Domain volume size (in GB)"
}
variable "diskspace_silenced" {
description = "Groups to mute for ES cluster diskspace monitor"
type = "map"
default = {}
}
variable "diskspace_enabled" {
description = "Flag to enable ES cluster diskspace monitor"
type = "string"
@ -121,12 +109,6 @@ variable "diskspace_threshold_critical" {
default = "10"
}
variable "cpu_silenced" {
description = "Groups to mute for ES cluster cpu monitor"
type = "map"
default = {}
}
variable "cpu_enabled" {
description = "Flag to enable ES cluster cpu monitor"
type = "string"

View File

@ -8,7 +8,7 @@ resource "datadog_monitor" "es_cluster_status" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ElasticSearch cluster status is not green"
message = "${coalesce(var.es_cluster_status_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
max(${var.es_cluster_status_timeframe}): (
@ -32,8 +32,6 @@ resource "datadog_monitor" "es_cluster_status" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.es_cluster_status_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticsearch", "team:claranet", "created-by:terraform", "${var.es_cluster_status_extra_tags}"]
}
@ -43,7 +41,7 @@ resource "datadog_monitor" "es_free_space_low" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ElasticSearch cluster free storage space {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.diskspace_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.diskspace_time_aggregator}(${var.diskspace_timeframe}): (
@ -67,8 +65,6 @@ resource "datadog_monitor" "es_free_space_low" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.diskspace_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticsearch", "team:claranet", "created-by:terraform", "${var.diskspace_extra_tags}"]
}
@ -78,7 +74,7 @@ resource "datadog_monitor" "es_cpu_90_15min" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] ElasticSearch cluster CPU high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.cpu_time_aggregator}(${var.cpu_timeframe}): (
@ -101,7 +97,5 @@ resource "datadog_monitor" "es_cpu_90_15min" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.cpu_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elasticsearch", "team:claranet", "created-by:terraform", "${var.cpu_extra_tags}"]
}

View File

@ -31,28 +31,24 @@ Creates DataDog monitors with the following checks:
| elb\_4xx\_enabled | Flag to enable ELB 4xx errors monitor | string | `"true"` | no |
| elb\_4xx\_extra\_tags | Extra tags for ELB 4xx errors monitor | list | `[]` | no |
| elb\_4xx\_message | Custom message for ELB 4xx errors monitor | string | `""` | no |
| elb\_4xx\_silenced | Groups to mute for ELB 4xx errors monitor | map | `{}` | no |
| elb\_4xx\_threshold\_critical | loadbalancer 4xx critical threshold in percentage | string | `"10"` | no |
| elb\_4xx\_threshold\_warning | loadbalancer 4xx warning threshold in percentage | string | `"5"` | no |
| elb\_4xx\_timeframe | Monitor timeframe for ELB 4xx errors [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| elb\_5xx\_enabled | Flag to enable ELB 5xx errors monitor | string | `"true"` | no |
| elb\_5xx\_extra\_tags | Extra tags for ELB 5xx errors monitor | list | `[]` | no |
| elb\_5xx\_message | Custom message for ELB 5xx errors monitor | string | `""` | no |
| elb\_5xx\_silenced | Groups to mute for ELB 5xx errors monitor | map | `{}` | no |
| elb\_5xx\_threshold\_critical | loadbalancer 5xx critical threshold in percentage | string | `"10"` | no |
| elb\_5xx\_threshold\_warning | loadbalancer 5xx warning threshold in percentage | string | `"5"` | no |
| elb\_5xx\_timeframe | Monitor timeframe for ELB 5xx errors [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| elb\_backend\_4xx\_enabled | Flag to enable ELB backend 4xx errors monitor | string | `"true"` | no |
| elb\_backend\_4xx\_extra\_tags | Extra tags for ELB backend 4xx errors monitor | list | `[]` | no |
| elb\_backend\_4xx\_message | Custom message for ELB backend 4xx errors monitor | string | `""` | no |
| elb\_backend\_4xx\_silenced | Groups to mute for ELB backend 4xx errors monitor | map | `{}` | no |
| elb\_backend\_4xx\_threshold\_critical | loadbalancer backend 4xx critical threshold in percentage | string | `"10"` | no |
| elb\_backend\_4xx\_threshold\_warning | loadbalancer backend 4xx warning threshold in percentage | string | `"5"` | no |
| elb\_backend\_4xx\_timeframe | Monitor timeframe for ELB backend 4xx errors [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| elb\_backend\_5xx\_enabled | Flag to enable ELB backend 5xx errors monitor | string | `"true"` | no |
| elb\_backend\_5xx\_extra\_tags | Extra tags for ELB backend 5xx errors monitor | list | `[]` | no |
| elb\_backend\_5xx\_message | Custom message for ELB backend 5xx errors monitor | string | `""` | no |
| elb\_backend\_5xx\_silenced | Groups to mute for ELB backend 5xx errors monitor | map | `{}` | no |
| elb\_backend\_5xx\_threshold\_critical | loadbalancer backend 5xx critical threshold in percentage | string | `"10"` | no |
| elb\_backend\_5xx\_threshold\_warning | loadbalancer backend 5xx warning threshold in percentage | string | `"5"` | no |
| elb\_backend\_5xx\_timeframe | Monitor timeframe for ELB backend 5xx errors [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
@ -60,14 +56,12 @@ Creates DataDog monitors with the following checks:
| elb\_backend\_latency\_enabled | Flag to enable ELB backend latency monitor | string | `"true"` | no |
| elb\_backend\_latency\_extra\_tags | Extra tags for ELB backend latency monitor | list | `[]` | no |
| elb\_backend\_latency\_message | Custom message for ELB backend latency monitor | string | `""` | no |
| elb\_backend\_latency\_silenced | Groups to mute for ELB backend latency monitor | map | `{}` | no |
| elb\_backend\_latency\_time\_aggregator | Monitor aggregator for ELB backend latency [available values: min, max or avg] | string | `"min"` | no |
| elb\_backend\_latency\_timeframe | Monitor timeframe for ELB backend latency [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| elb\_backend\_latency\_warning | latency warning threshold in seconds | string | `"1"` | no |
| elb\_no\_healthy\_instance\_enabled | Flag to enable ELB no healty instance monitor | string | `"true"` | no |
| elb\_no\_healthy\_instance\_extra\_tags | Extra tags for ELB no healty instance monitor | list | `[]` | no |
| elb\_no\_healthy\_instance\_message | Custom message for ELB no healty instance monitor | string | `""` | no |
| elb\_no\_healthy\_instance\_silenced | Groups to mute for ELB no healty instance monitor | map | `{}` | no |
| elb\_no\_healthy\_instance\_time\_aggregator | Monitor aggregator for ELB no healty instance [available values: min or max] | string | `"min"` | no |
| elb\_no\_healthy\_instance\_timeframe | Monitor timeframe for ELB no healty instance [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| environment | Architecture Environment | string | n/a | yes |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
## ELB
variable "elb_no_healthy_instance_silenced" {
description = "Groups to mute for ELB no healty instance monitor"
type = "map"
default = {}
}
variable "elb_no_healthy_instance_enabled" {
description = "Flag to enable ELB no healty instance monitor"
@ -76,12 +71,6 @@ variable "elb_no_healthy_instance_timeframe" {
default = "last_5m"
}
variable "elb_4xx_silenced" {
description = "Groups to mute for ELB 4xx errors monitor"
type = "map"
default = {}
}
variable "elb_4xx_enabled" {
description = "Flag to enable ELB 4xx errors monitor"
type = "string"
@ -116,12 +105,6 @@ variable "elb_4xx_threshold_critical" {
default = 10
}
variable "elb_5xx_silenced" {
description = "Groups to mute for ELB 5xx errors monitor"
type = "map"
default = {}
}
variable "elb_5xx_enabled" {
description = "Flag to enable ELB 5xx errors monitor"
type = "string"
@ -156,12 +139,6 @@ variable "elb_5xx_threshold_critical" {
default = 10
}
variable "elb_backend_4xx_silenced" {
description = "Groups to mute for ELB backend 4xx errors monitor"
type = "map"
default = {}
}
variable "elb_backend_4xx_enabled" {
description = "Flag to enable ELB backend 4xx errors monitor"
type = "string"
@ -196,12 +173,6 @@ variable "elb_backend_4xx_threshold_critical" {
default = 10
}
variable "elb_backend_5xx_silenced" {
description = "Groups to mute for ELB backend 5xx errors monitor"
type = "map"
default = {}
}
variable "elb_backend_5xx_enabled" {
description = "Flag to enable ELB backend 5xx errors monitor"
type = "string"
@ -236,12 +207,6 @@ variable "elb_backend_5xx_threshold_critical" {
default = 10
}
variable "elb_backend_latency_silenced" {
description = "Groups to mute for ELB backend latency monitor"
type = "map"
default = {}
}
variable "elb_backend_latency_enabled" {
description = "Flag to enable ELB backend latency monitor"
type = "string"

View File

@ -11,7 +11,7 @@ resource "datadog_monitor" "ELB_no_healthy_instances" {
) * 100 < 1
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = 1
@ -28,8 +28,6 @@ resource "datadog_monitor" "ELB_no_healthy_instances" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.elb_no_healthy_instance_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elb", "team:claranet", "created-by:terraform", "${var.elb_no_healthy_instance_extra_tags}"]
}
@ -45,7 +43,7 @@ resource "datadog_monitor" "ELB_too_much_4xx" {
* 100 > ${var.elb_4xx_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.elb_4xx_threshold_warning}"
@ -62,8 +60,6 @@ resource "datadog_monitor" "ELB_too_much_4xx" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.elb_4xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elb", "team:claranet", "created-by:terraform", "${var.elb_4xx_extra_tags}"]
}
@ -79,7 +75,7 @@ resource "datadog_monitor" "ELB_too_much_5xx" {
* 100 > ${var.elb_5xx_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.elb_5xx_threshold_warning}"
@ -96,8 +92,6 @@ resource "datadog_monitor" "ELB_too_much_5xx" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.elb_5xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elb", "team:claranet", "created-by:terraform", "${var.elb_5xx_extra_tags}"]
}
@ -113,7 +107,7 @@ resource "datadog_monitor" "ELB_too_much_4xx_backend" {
* 100 > ${var.elb_backend_4xx_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.elb_backend_4xx_threshold_warning}"
@ -130,8 +124,6 @@ resource "datadog_monitor" "ELB_too_much_4xx_backend" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.elb_backend_4xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elb", "team:claranet", "created-by:terraform", "${var.elb_backend_4xx_extra_tags}"]
}
@ -147,7 +139,7 @@ resource "datadog_monitor" "ELB_too_much_5xx_backend" {
* 100 > ${var.elb_backend_5xx_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.elb_backend_5xx_threshold_warning}"
@ -164,8 +156,6 @@ resource "datadog_monitor" "ELB_too_much_5xx_backend" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.elb_backend_5xx_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elb", "team:claranet", "created-by:terraform", "${var.elb_backend_5xx_extra_tags}"]
}
@ -180,7 +170,7 @@ resource "datadog_monitor" "ELB_backend_latency" {
> ${var.elb_backend_latency_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.elb_backend_latency_warning}"
@ -197,7 +187,5 @@ resource "datadog_monitor" "ELB_backend_latency" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.elb_backend_latency_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:elb", "team:claranet", "created-by:terraform", "${var.elb_backend_latency_extra_tags}"]
}

View File

@ -30,7 +30,6 @@ Creates DataDog monitors with the following checks:
| incoming\_records\_enabled | Flag to enable Kinesis Firehorse incoming records monitor | string | `"true"` | no |
| incoming\_records\_extra\_tags | Extra tags for Kinesis Firehorse incoming records monitor | list | `[]` | no |
| incoming\_records\_message | Custom message for Kinesis Firehorse incoming records monitor | string | `""` | no |
| incoming\_records\_silenced | Groups to mute for Kinesis Firehorse incoming records monitor | map | `{}` | no |
| incoming\_records\_timeframe | Monitor timeframe for incoming records metrics evaluation [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_15m"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes |
| new\_host\_delay | Delay in seconds before monitor new resource | string | `"300"` | no |

View File

@ -41,12 +41,6 @@ variable "filter_tags_custom_excluded" {
# Kinesis-Firehose
variable "incoming_records_silenced" {
description = "Groups to mute for Kinesis Firehorse incoming records monitor"
type = "map"
default = {}
}
variable "incoming_records_enabled" {
description = "Flag to enable Kinesis Firehorse incoming records monitor"
type = "string"

View File

@ -4,7 +4,7 @@ resource "datadog_monitor" "firehose_incoming_records" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Kinesis Firehose No incoming records"
message = "${coalesce(var.incoming_records_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
sum(${var.incoming_records_timeframe}): (
@ -26,7 +26,5 @@ resource "datadog_monitor" "firehose_incoming_records" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.incoming_records_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:kinesis-firehose", "team:claranet", "created-by:terraform", "${var.incoming_records_extra_tags}"]
}

View File

@ -25,7 +25,6 @@ Creates DataDog monitors with the following checks:
| aurora\_replicalag\_enabled | Flag to enable RDS Aurora replica lag monitor | string | `"true"` | no |
| aurora\_replicalag\_extra\_tags | Extra tags for RDS Aurora replica lag monitor | list | `[]` | no |
| aurora\_replicalag\_message | Custom message for RDS Aurora replica lag monitor | string | `""` | no |
| aurora\_replicalag\_silenced | Groups to mute for RDS Aurora replica lag monitor | map | `{}` | no |
| aurora\_replicalag\_threshold\_critical | Aurora replica lag in milliseconds (critical threshold) | string | `"200"` | no |
| aurora\_replicalag\_threshold\_warning | Aurora replica lag in milliseconds (warning threshold) | string | `"100"` | no |
| aurora\_replicalag\_timeframe | Monitor timeframe for RDS Aurora replica lag monitor [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -41,12 +41,6 @@ variable "filter_tags_custom_excluded" {
# AWS RDS Aurora instance specific
variable "aurora_replicalag_silenced" {
description = "Groups to mute for RDS Aurora replica lag monitor"
type = "map"
default = {}
}
variable "aurora_replicalag_enabled" {
description = "Flag to enable RDS Aurora replica lag monitor"
type = "string"

View File

@ -4,7 +4,7 @@ resource "datadog_monitor" "rds_aurora_mysql_replica_lag" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS Aurora Mysql replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}"
message = "${coalesce(var.aurora_replicalag_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
avg(${var.aurora_replicalag_timeframe}): (
@ -26,7 +26,5 @@ resource "datadog_monitor" "rds_aurora_mysql_replica_lag" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.aurora_replicalag_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:rds-aurora-mysql", "team:claranet", "created-by:terraform", "${var.aurora_replicalag_extra_tags}"]
}

View File

@ -25,7 +25,6 @@ Creates DataDog monitors with the following checks:
| aurora\_replicalag\_enabled | Flag to enable RDS Aurora replica lag monitor | string | `"true"` | no |
| aurora\_replicalag\_extra\_tags | Extra tags for RDS Aurora replica lag monitor | list | `[]` | no |
| aurora\_replicalag\_message | Custom message for RDS Aurora replica lag monitor | string | `""` | no |
| aurora\_replicalag\_silenced | Groups to mute for RDS Aurora replica lag monitor | map | `{}` | no |
| aurora\_replicalag\_threshold\_critical | Aurora replica lag in milliseconds (critical threshold) | string | `"200"` | no |
| aurora\_replicalag\_threshold\_warning | Aurora replica lag in milliseconds (warning threshold) | string | `"100"` | no |
| aurora\_replicalag\_timeframe | Monitor timeframe for RDS Aurora replica lag monitor [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -41,12 +41,6 @@ variable "filter_tags_custom_excluded" {
# AWS RDS Aurora instance specific
variable "aurora_replicalag_silenced" {
description = "Groups to mute for RDS Aurora replica lag monitor"
type = "map"
default = {}
}
variable "aurora_replicalag_enabled" {
description = "Flag to enable RDS Aurora replica lag monitor"
type = "string"

View File

@ -4,7 +4,7 @@ resource "datadog_monitor" "rds_aurora_postgresql_replica_lag" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS Aurora PostgreSQL replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}"
message = "${coalesce(var.aurora_replicalag_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
avg(${var.aurora_replicalag_timeframe}): (
@ -26,7 +26,5 @@ resource "datadog_monitor" "rds_aurora_postgresql_replica_lag" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.aurora_replicalag_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:rds-aurora-postgresql", "team:claranet", "created-by:terraform", "${var.aurora_replicalag_extra_tags}"]
}

View File

@ -27,7 +27,6 @@ Creates DataDog monitors with the following checks:
| cpu\_enabled | Flag to enable RDS CPU usage monitor | string | `"true"` | no |
| cpu\_extra\_tags | Extra tags for RDS CPU usage monitor | list | `[]` | no |
| cpu\_message | Custom message for RDS CPU usage monitor | string | `""` | no |
| cpu\_silenced | Groups to mute for RDS CPU usage monitor | map | `{}` | no |
| cpu\_threshold\_critical | CPU usage in percent (critical threshold) | string | `"90"` | no |
| cpu\_threshold\_warning | CPU usage in percent (warning threshold) | string | `"80"` | no |
| cpu\_time\_aggregator | Monitor aggregator for RDS CPU usage [available values: min, max or avg] | string | `"min"` | no |
@ -35,7 +34,6 @@ Creates DataDog monitors with the following checks:
| diskspace\_enabled | Flag to enable RDS free diskspace monitor | string | `"true"` | no |
| diskspace\_extra\_tags | Extra tags for RDS free diskspace monitor | list | `[]` | no |
| diskspace\_message | Custom message for RDS free diskspace monitor | string | `""` | no |
| diskspace\_silenced | Groups to mute for RDS free diskspace monitor | map | `{}` | no |
| diskspace\_threshold\_critical | Disk free space in percent (critical threshold) | string | `"10"` | no |
| diskspace\_threshold\_warning | Disk free space in percent (warning threshold) | string | `"20"` | no |
| diskspace\_time\_aggregator | Monitor aggregator for RDS free diskspace [available values: min, max or avg] | string | `"min"` | no |
@ -51,7 +49,6 @@ Creates DataDog monitors with the following checks:
| replicalag\_enabled | Flag to enable RDS replica lag monitor | string | `"true"` | no |
| replicalag\_extra\_tags | Extra tags for RDS replica lag monitor | list | `[]` | no |
| replicalag\_message | Custom message for RDS replica lag monitor | string | `""` | no |
| replicalag\_silenced | Groups to mute for RDS replica lag monitor | map | `{}` | no |
| replicalag\_threshold\_critical | replica lag in seconds (critical threshold) | string | `"300"` | no |
| replicalag\_threshold\_warning | replica lag in seconds (warning threshold) | string | `"200"` | no |
| replicalag\_timeframe | Monitor timeframe for RDS replica lag monitor [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -41,12 +41,6 @@ variable "filter_tags_custom_excluded" {
# AWS RDS instance specific
variable "cpu_silenced" {
description = "Groups to mute for RDS CPU usage monitor"
type = "map"
default = {}
}
variable "cpu_enabled" {
description = "Flag to enable RDS CPU usage monitor"
type = "string"
@ -87,12 +81,6 @@ variable "cpu_threshold_critical" {
default = "90"
}
variable "diskspace_silenced" {
description = "Groups to mute for RDS free diskspace monitor"
type = "map"
default = {}
}
variable "diskspace_enabled" {
description = "Flag to enable RDS free diskspace monitor"
type = "string"
@ -133,12 +121,6 @@ variable "diskspace_threshold_critical" {
default = "10"
}
variable "replicalag_silenced" {
description = "Groups to mute for RDS replica lag monitor"
type = "map"
default = {}
}
variable "replicalag_enabled" {
description = "Flag to enable RDS replica lag monitor"
type = "string"

View File

@ -4,7 +4,7 @@ resource "datadog_monitor" "rds_cpu_90_15min" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS instance CPU high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.cpu_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.cpu_time_aggregator}(${var.cpu_timeframe}): (
@ -26,8 +26,6 @@ resource "datadog_monitor" "rds_cpu_90_15min" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.cpu_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:rds", "team:claranet", "created-by:terraform", "${var.cpu_extra_tags}"]
}
@ -37,7 +35,7 @@ resource "datadog_monitor" "rds_free_space_low" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS instance free space {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
message = "${coalesce(var.diskspace_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
${var.diskspace_time_aggregator}(${var.diskspace_timeframe}): (
@ -60,8 +58,6 @@ resource "datadog_monitor" "rds_free_space_low" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.diskspace_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:rds", "team:claranet", "created-by:terraform", "${var.diskspace_extra_tags}"]
}
@ -71,7 +67,7 @@ resource "datadog_monitor" "rds_replica_lag" {
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] RDS replica lag {{#is_alert}}{{{comparator}}} {{threshold}} ms ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ms ({{value}}%){{/is_warning}}"
message = "${coalesce(var.replicalag_message, var.message)}"
type = "metric alert"
type = "query alert"
query = <<EOQ
avg(${var.replicalag_timeframe}): (
@ -93,7 +89,5 @@ resource "datadog_monitor" "rds_replica_lag" {
require_full_window = false
new_host_delay = "${var.new_host_delay}"
silenced = "${var.replicalag_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:rds", "team:claranet", "created-by:terraform", "${var.replicalag_extra_tags}"]
}

View File

@ -31,7 +31,6 @@ Creates DataDog monitors with the following checks:
| vpn\_status\_enabled | Flag to enable VPN status monitor | string | `"true"` | no |
| vpn\_status\_extra\_tags | Extra tags for VPN status monitor | list | `[]` | no |
| vpn\_status\_message | Custom message for VPN status monitor | string | `""` | no |
| vpn\_status\_silenced | Groups to mute for VPN status monitor | map | `{}` | no |
| vpn\_status\_time\_aggregator | Monitor aggregator for VPN status [available values: min, max or avg] | string | `"max"` | no |
| vpn\_status\_timeframe | Monitor timeframe for VPN status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -29,12 +29,6 @@ variable "filter_tags" {
default = "*"
}
variable "vpn_status_silenced" {
description = "Groups to mute for VPN status monitor"
type = "map"
default = {}
}
variable "vpn_status_enabled" {
description = "Flag to enable VPN status monitor"
type = "string"

View File

@ -9,7 +9,7 @@ resource "datadog_monitor" "VPN_status" {
) < 1
EOQ
type = "metric alert"
type = "query alert"
notify_no_data = true
renotify_interval = 0
@ -20,7 +20,5 @@ resource "datadog_monitor" "VPN_status" {
include_tags = true
require_full_window = false
silenced = "${var.vpn_status_silenced}"
tags = ["env:${var.environment}", "type:cloud", "provider:aws", "resource:vpn", "team:claranet", "created-by:terraform", "${var.vpn_status_extra_tags}"]
}

View File

@ -31,7 +31,6 @@ Creates DataDog monitors with the following checks:
| failed\_requests\_enabled | Flag to enable API Management failed requests monitor | string | `"true"` | no |
| failed\_requests\_extra\_tags | Extra tags for API Management failed requests monitor | list | `[]` | no |
| failed\_requests\_message | Custom message for API Management failed requests monitor | string | `""` | no |
| failed\_requests\_silenced | Groups to mute for API Management failed requests monitor | map | `{}` | no |
| failed\_requests\_threshold\_critical | Maximum acceptable percent of failed requests | string | `"90"` | no |
| failed\_requests\_threshold\_warning | Warning regarding acceptable percent of failed requests | string | `"50"` | no |
| failed\_requests\_time\_aggregator | Monitor aggregator for API Management failed requests [available values: min, max or avg] | string | `"min"` | no |
@ -44,7 +43,6 @@ Creates DataDog monitors with the following checks:
| other\_requests\_enabled | Flag to enable API Management other requests monitor | string | `"true"` | no |
| other\_requests\_extra\_tags | Extra tags for API Management other requests monitor | list | `[]` | no |
| other\_requests\_message | Custom message for API Management other requests monitor | string | `""` | no |
| other\_requests\_silenced | Groups to mute for API Management other requests monitor | map | `{}` | no |
| other\_requests\_threshold\_critical | Maximum acceptable percent of other requests | string | `"90"` | no |
| other\_requests\_threshold\_warning | Warning regarding acceptable percent of other requests | string | `"50"` | no |
| other\_requests\_time\_aggregator | Monitor aggregator for API Management other requests [available values: min, max or avg] | string | `"min"` | no |
@ -53,13 +51,11 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable API Management status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for API Management status monitor | list | `[]` | no |
| status\_message | Custom message for API Management status monitor | string | `""` | no |
| status\_silenced | Groups to mute for API Management status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for API Management status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for API Management status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| successful\_requests\_enabled | Flag to enable API Management successful requests monitor | string | `"true"` | no |
| successful\_requests\_extra\_tags | Extra tags for API Management successful requests monitor | list | `[]` | no |
| successful\_requests\_message | Custom message for API Management successful requests monitor | string | `""` | no |
| successful\_requests\_silenced | Groups to mute for API Management successful requests monitor | map | `{}` | no |
| successful\_requests\_threshold\_critical | Minimum acceptable percent of successful requests | string | `"10"` | no |
| successful\_requests\_threshold\_warning | Warning regarding acceptable percent of successful requests | string | `"30"` | no |
| successful\_requests\_time\_aggregator | Monitor aggregator for API Management successful requests [available values: min, max or avg] | string | `"max"` | no |
@ -67,7 +63,6 @@ Creates DataDog monitors with the following checks:
| unauthorized\_requests\_enabled | Flag to enable API Management unauthorized requests monitor | string | `"true"` | no |
| unauthorized\_requests\_extra\_tags | Extra tags for API Management unauthorized requests monitor | list | `[]` | no |
| unauthorized\_requests\_message | Custom message for API Management unauthorized requests monitor | string | `""` | no |
| unauthorized\_requests\_silenced | Groups to mute for API Management unauthorized requests monitor | map | `{}` | no |
| unauthorized\_requests\_threshold\_critical | Maximum acceptable percent of unauthorized requests | string | `"90"` | no |
| unauthorized\_requests\_threshold\_warning | Warning regarding acceptable percent of unauthorized requests | string | `"50"` | no |
| unauthorized\_requests\_time\_aggregator | Monitor aggregator for API Management unauthorized requests [available values: min, max or avg] | string | `"min"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# Azure API Management specific
variable "status_silenced" {
description = "Groups to mute for API Management status monitor"
type = "map"
default = {}
}
variable "status_enabled" {
description = "Flag to enable API Management status monitor"
@ -76,12 +71,6 @@ variable "status_timeframe" {
default = "last_5m"
}
variable "failed_requests_silenced" {
description = "Groups to mute for API Management failed requests monitor"
type = "map"
default = {}
}
variable "failed_requests_enabled" {
description = "Flag to enable API Management failed requests monitor"
type = "string"
@ -122,12 +111,6 @@ variable "failed_requests_threshold_warning" {
default = 50
}
variable "other_requests_silenced" {
description = "Groups to mute for API Management other requests monitor"
type = "map"
default = {}
}
variable "other_requests_enabled" {
description = "Flag to enable API Management other requests monitor"
type = "string"
@ -168,12 +151,6 @@ variable "other_requests_threshold_warning" {
default = 50
}
variable "unauthorized_requests_silenced" {
description = "Groups to mute for API Management unauthorized requests monitor"
type = "map"
default = {}
}
variable "unauthorized_requests_enabled" {
description = "Flag to enable API Management unauthorized requests monitor"
type = "string"
@ -214,12 +191,6 @@ variable "unauthorized_requests_threshold_warning" {
default = 50
}
variable "successful_requests_silenced" {
description = "Groups to mute for API Management successful requests monitor"
type = "map"
default = {}
}
variable "successful_requests_enabled" {
description = "Flag to enable API Management successful requests monitor"
type = "string"

View File

@ -13,8 +13,6 @@ resource "datadog_monitor" "apimgt_status" {
critical = 1
}
silenced = "${var.status_silenced}"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -45,9 +43,7 @@ resource "datadog_monitor" "apimgt_failed_requests" {
warning = "${var.failed_requests_threshold_warning}"
}
silenced = "${var.failed_requests_silenced}"
type = "metric alert"
type = "query alert"
notify_no_data = false
notify_audit = false
timeout_h = 1
@ -78,9 +74,7 @@ resource "datadog_monitor" "apimgt_other_requests" {
warning = "${var.other_requests_threshold_warning}"
}
silenced = "${var.other_requests_silenced}"
type = "metric alert"
type = "query alert"
notify_no_data = false
notify_audit = false
timeout_h = 1
@ -111,9 +105,7 @@ resource "datadog_monitor" "apimgt_unauthorized_requests" {
warning = "${var.unauthorized_requests_threshold_warning}"
}
silenced = "${var.unauthorized_requests_silenced}"
type = "metric alert"
type = "query alert"
notify_no_data = false
notify_audit = false
timeout_h = 1
@ -146,9 +138,7 @@ resource "datadog_monitor" "apimgt_successful_requests" {
warning = "${var.successful_requests_threshold_warning}"
}
silenced = "${var.successful_requests_silenced}"
type = "metric alert"
type = "query alert"
notify_no_data = false
notify_audit = false
timeout_h = 1

View File

@ -35,7 +35,6 @@ Creates DataDog monitors with the following checks:
| http\_4xx\_requests\_enabled | Flag to enable App Services 4xx requests monitor | string | `"true"` | no |
| http\_4xx\_requests\_extra\_tags | Extra tags for App Services 4xx requests monitor | list | `[]` | no |
| http\_4xx\_requests\_message | Custom message for App Services 4xx requests monitor | string | `""` | no |
| http\_4xx\_requests\_silenced | Groups to mute for App Services 4xx requests monitor | map | `{}` | no |
| http\_4xx\_requests\_threshold\_critical | Maximum critical acceptable percent of 4xx errors | string | `"90"` | no |
| http\_4xx\_requests\_threshold\_warning | Warning regarding acceptable percent of 4xx errors | string | `"50"` | no |
| http\_4xx\_requests\_time\_aggregator | Monitor aggregator for App Services 4xx requests [available values: min, max or avg] | string | `"min"` | no |
@ -43,7 +42,6 @@ Creates DataDog monitors with the following checks:
| http\_5xx\_requests\_enabled | Flag to enable App Services 5xx requests monitor | string | `"true"` | no |
| http\_5xx\_requests\_extra\_tags | Extra tags for App Services 5xx requests monitor | list | `[]` | no |
| http\_5xx\_requests\_message | Custom message for App Services 5xx requests monitor | string | `""` | no |
| http\_5xx\_requests\_silenced | Groups to mute for App Services 5xx requests monitor | map | `{}` | no |
| http\_5xx\_requests\_threshold\_critical | Maximum critical acceptable percent of 5xx errors | string | `"90"` | no |
| http\_5xx\_requests\_threshold\_warning | Warning regarding acceptable percent of 5xx errors | string | `"50"` | no |
| http\_5xx\_requests\_time\_aggregator | Monitor aggregator for App Services 5xx requests [available values: min, max or avg] | string | `"min"` | no |
@ -51,7 +49,6 @@ Creates DataDog monitors with the following checks:
| http\_successful\_requests\_enabled | Flag to enable App Services successful requests monitor | string | `"true"` | no |
| http\_successful\_requests\_extra\_tags | Extra tags for App Services successful requests monitor | list | `[]` | no |
| http\_successful\_requests\_message | Custom message for App Services successful requests monitor | string | `""` | no |
| http\_successful\_requests\_silenced | Groups to mute for App Services successful requests monitor | map | `{}` | no |
| http\_successful\_requests\_threshold\_critical | Minimum critical acceptable percent of 2xx & 3xx requests | string | `"10"` | no |
| http\_successful\_requests\_threshold\_warning | Warning regarding acceptable percent of 2xx & 3xx requests | string | `"30"` | no |
| http\_successful\_requests\_time\_aggregator | Monitor aggregator for App Services successful requests [available values: min, max or avg] | string | `"max"` | no |
@ -59,7 +56,6 @@ Creates DataDog monitors with the following checks:
| memory\_usage\_enabled | Flag to enable App Services memory usage monitor | string | `"true"` | no |
| memory\_usage\_extra\_tags | Extra tags for App Services memory usage monitor | list | `[]` | no |
| memory\_usage\_message | Custom message for App Services memory usage monitor | string | `""` | no |
| memory\_usage\_silenced | Groups to mute for App Services memory usage monitor | map | `{}` | no |
| memory\_usage\_threshold\_critical | Alerting threshold in Mib | string | `"1073741824"` | no |
| memory\_usage\_threshold\_warning | Warning threshold in MiB | string | `"536870912"` | no |
| memory\_usage\_time\_aggregator | Monitor aggregator for App Services memory usage [available values: min, max or avg] | string | `"min"` | no |
@ -70,7 +66,6 @@ Creates DataDog monitors with the following checks:
| response\_time\_enabled | Flag to enable App Services response time monitor | string | `"true"` | no |
| response\_time\_extra\_tags | Extra tags for App Services response time monitor | list | `[]` | no |
| response\_time\_message | Custom message for App Services response time monitor | string | `""` | no |
| response\_time\_silenced | Groups to mute for App Services response time monitor | map | `{}` | no |
| response\_time\_threshold\_critical | Alerting threshold for response time in seconds | string | `"10"` | no |
| response\_time\_threshold\_warning | Warning threshold for response time in seconds | string | `"5"` | no |
| response\_time\_time\_aggregator | Monitor aggregator for App Services response time [available values: min, max or avg] | string | `"min"` | no |
@ -78,7 +73,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable App Services status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for App Services status monitor | list | `[]` | no |
| status\_message | Custom message for App Services status monitor | string | `""` | no |
| status\_silenced | Groups to mute for App Services status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for App Services status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for App Services status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -38,11 +38,6 @@ variable "prefix_slug" {
}
# Azure App Services specific variables
variable "response_time_silenced" {
description = "Groups to mute for App Services response time monitor"
type = "map"
default = {}
}
variable "response_time_enabled" {
description = "Flag to enable App Services response time monitor"
@ -84,12 +79,6 @@ variable "response_time_threshold_warning" {
description = "Warning threshold for response time in seconds"
}
variable "memory_usage_silenced" {
description = "Groups to mute for App Services memory usage monitor"
type = "map"
default = {}
}
variable "memory_usage_enabled" {
description = "Flag to enable App Services memory usage monitor"
type = "string"
@ -130,12 +119,6 @@ variable "memory_usage_threshold_warning" {
description = "Warning threshold in MiB"
}
variable "http_4xx_requests_silenced" {
description = "Groups to mute for App Services 4xx requests monitor"
type = "map"
default = {}
}
variable "http_4xx_requests_enabled" {
description = "Flag to enable App Services 4xx requests monitor"
type = "string"
@ -176,12 +159,6 @@ variable "http_4xx_requests_threshold_warning" {
description = "Warning regarding acceptable percent of 4xx errors"
}
variable "http_5xx_requests_silenced" {
description = "Groups to mute for App Services 5xx requests monitor"
type = "map"
default = {}
}
variable "http_5xx_requests_enabled" {
description = "Flag to enable App Services 5xx requests monitor"
type = "string"
@ -222,12 +199,6 @@ variable "http_5xx_requests_threshold_warning" {
description = "Warning regarding acceptable percent of 5xx errors"
}
variable "http_successful_requests_silenced" {
description = "Groups to mute for App Services successful requests monitor"
type = "map"
default = {}
}
variable "http_successful_requests_enabled" {
description = "Flag to enable App Services successful requests monitor"
type = "string"
@ -280,12 +251,6 @@ variable "status_message" {
default = ""
}
variable "status_silenced" {
description = "Groups to mute for App Services status monitor"
type = "map"
default = {}
}
variable "status_extra_tags" {
description = "Extra tags for App Services status monitor"
type = "list"

View File

@ -2,7 +2,7 @@
resource "datadog_monitor" "appservices_response_time" {
count = "${var.response_time_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services response time too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.response_time_message, var.message)}"
query = <<EOQ
@ -19,8 +19,6 @@ resource "datadog_monitor" "appservices_response_time" {
critical = "${var.response_time_threshold_critical}"
}
silenced = "${var.response_time_silenced}"
notify_no_data = false # Will NOT notify when no data is received
renotify_interval = 0
require_full_window = false
@ -34,7 +32,7 @@ resource "datadog_monitor" "appservices_response_time" {
resource "datadog_monitor" "appservices_memory_usage_count" {
count = "${var.memory_usage_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services memory usage {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.memory_usage_message, var.message)}"
query = <<EOQ
@ -51,8 +49,6 @@ resource "datadog_monitor" "appservices_memory_usage_count" {
critical = "${var.memory_usage_threshold_critical}"
}
silenced = "${var.memory_usage_silenced}"
notify_no_data = false # Will NOT notify when no data is received
renotify_interval = 0
require_full_window = false
@ -66,7 +62,7 @@ resource "datadog_monitor" "appservices_memory_usage_count" {
resource "datadog_monitor" "appservices_http_5xx_errors_count" {
count = "${var.http_5xx_requests_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services HTTP 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.http_5xx_requests_message, var.message)}"
query = <<EOQ
@ -84,8 +80,6 @@ resource "datadog_monitor" "appservices_http_5xx_errors_count" {
critical = "${var.http_5xx_requests_threshold_critical}"
}
silenced = "${var.http_5xx_requests_silenced}"
notify_no_data = false # Will NOT notify when no data is received
renotify_interval = 0
require_full_window = false
@ -99,7 +93,7 @@ resource "datadog_monitor" "appservices_http_5xx_errors_count" {
resource "datadog_monitor" "appservices_http_4xx_errors_count" {
count = "${var.http_4xx_requests_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services HTTP 4xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.http_4xx_requests_message, var.message)}"
query = <<EOQ
@ -117,8 +111,6 @@ resource "datadog_monitor" "appservices_http_4xx_errors_count" {
critical = "${var.http_4xx_requests_threshold_critical}"
}
silenced = "${var.http_4xx_requests_silenced}"
notify_no_data = false # Will NOT notify when no data is received
renotify_interval = 0
require_full_window = false
@ -132,7 +124,7 @@ resource "datadog_monitor" "appservices_http_4xx_errors_count" {
resource "datadog_monitor" "appservices_http_success_status_rate" {
count = "${var.http_successful_requests_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] App Services HTTP successful responses too low {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.http_successful_requests_message, var.message)}"
query = <<EOQ
@ -152,8 +144,6 @@ resource "datadog_monitor" "appservices_http_success_status_rate" {
critical = "${var.http_successful_requests_threshold_critical}"
}
silenced = "${var.http_successful_requests_silenced}"
notify_no_data = false # Will notify when no data is received
renotify_interval = 0
require_full_window = false
@ -181,7 +171,6 @@ resource "datadog_monitor" "appservices_status" {
critical = 1
}
silenced = "${var.status_silenced}"
notify_no_data = true # Will notify when no data is received
renotify_interval = 0
require_full_window = false

View File

@ -31,7 +31,6 @@ Creates DataDog monitors with the following checks:
| latency\_enabled | Flag to enable Azure Search latency monitor | string | `"true"` | no |
| latency\_extra\_tags | Extra tags for Azure Search latency monitor | list | `[]` | no |
| latency\_message | Custom message for Azure Search latency monitor | string | `""` | no |
| latency\_silenced | Groups to mute for Azure Search latency monitor | map | `{}` | no |
| latency\_threshold\_critical | Alerting threshold for Azure Search latency in seconds | string | `"4"` | no |
| latency\_threshold\_warning | Warning threshold for Azure Search latency in seconds | string | `"2"` | no |
| latency\_time\_aggregator | Monitor aggregator for Azure Search latency [available values: min, max or avg] | string | `"min"` | no |
@ -42,7 +41,6 @@ Creates DataDog monitors with the following checks:
| throttled\_queries\_rate\_enabled | Flag to enable Azure Search throttled queries rate monitor | string | `"true"` | no |
| throttled\_queries\_rate\_extra\_tags | Extra tags for Azure Search throttled queries rate monitor | list | `[]` | no |
| throttled\_queries\_rate\_message | Custom message for Azure Search throttled queries rate monitor | string | `""` | no |
| throttled\_queries\_rate\_silenced | Groups to mute for Azure Search throttled queries rate monitor | map | `{}` | no |
| throttled\_queries\_rate\_threshold\_critical | Alerting threshold for Azure Search throttled queries rate | string | `"50"` | no |
| throttled\_queries\_rate\_threshold\_warning | Warning threshold for Azure Search throttled queries rate | string | `"25"` | no |
| throttled\_queries\_rate\_time\_aggregator | Monitor aggregator for Azure Search throttled queries rate [available values: min, max or avg] | string | `"min"` | no |

View File

@ -38,11 +38,6 @@ variable "prefix_slug" {
}
# Azure Search specific variables
variable "latency_silenced" {
description = "Groups to mute for Azure Search latency monitor"
type = "map"
default = {}
}
variable "latency_enabled" {
description = "Flag to enable Azure Search latency monitor"
@ -84,12 +79,6 @@ variable "latency_threshold_warning" {
description = "Warning threshold for Azure Search latency in seconds"
}
variable "throttled_queries_rate_silenced" {
description = "Groups to mute for Azure Search throttled queries rate monitor"
type = "map"
default = {}
}
variable "throttled_queries_rate_enabled" {
description = "Flag to enable Azure Search throttled queries rate monitor"
type = "string"

View File

@ -2,7 +2,7 @@
resource "datadog_monitor" "azure_search_latency" {
count = "${var.latency_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Azure Search latency too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.latency_message, var.message)}"
query = <<EOQ
@ -19,8 +19,6 @@ resource "datadog_monitor" "azure_search_latency" {
critical = "${var.latency_threshold_critical}"
}
silenced = "${var.latency_silenced}"
notify_no_data = true # Will not notify when no data is received
renotify_interval = 0
require_full_window = false
@ -34,7 +32,7 @@ resource "datadog_monitor" "azure_search_latency" {
resource "datadog_monitor" "azure_search_throttled_queries_rate" {
count = "${var.throttled_queries_rate_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Azure Search throttled queries rate is too high {{#is_alert}}{{{comparator}}} {{threshold}}s ({{value}}s){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}s ({{value}}s){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.throttled_queries_rate_message, var.message)}"
query = <<EOQ
@ -51,8 +49,6 @@ resource "datadog_monitor" "azure_search_throttled_queries_rate" {
critical = "${var.throttled_queries_rate_threshold_critical}"
}
silenced = "${var.throttled_queries_rate_silenced}"
notify_no_data = false # Will notify when no data is received
renotify_interval = 0
require_full_window = false

View File

@ -32,7 +32,6 @@ Creates DataDog monitors with the following checks:
| cosmos\_db\_4xx\_request\_timeframe | Monitor timeframe for Cosmos DB 4xx requests [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| cosmos\_db\_4xx\_requests\_enabled | Flag to enable Cosmos DB 4xx requests monitor | string | `"true"` | no |
| cosmos\_db\_4xx\_requests\_message | Custom message for Cosmos DB 4xx requests monitor | string | `""` | no |
| cosmos\_db\_4xx\_requests\_silenced | Groups to mute for Cosmos DB 4xx requests monitor | map | `{}` | no |
| cosmos\_db\_5xx\_request\_rate\_extra\_tags | Extra tags for Cosmos DB 5xx requests monitor | list | `[]` | no |
| cosmos\_db\_5xx\_request\_rate\_threshold\_critical | Critical threshold for Cosmos DB 5xx requests monitor | string | `"80"` | no |
| cosmos\_db\_5xx\_request\_rate\_threshold\_warning | Warning threshold for Cosmos DB 5xx requests monitor | string | `"50"` | no |
@ -40,13 +39,11 @@ Creates DataDog monitors with the following checks:
| cosmos\_db\_5xx\_request\_timeframe | Monitor timeframe for Cosmos DB 5xx requests [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| cosmos\_db\_5xx\_requests\_enabled | Flag to enable Cosmos DB 5xx requests monitor | string | `"true"` | no |
| cosmos\_db\_5xx\_requests\_message | Custom message for Cosmos DB 5xx requests monitor | string | `""` | no |
| cosmos\_db\_5xx\_requests\_silenced | Groups to mute for Cosmos DB 5xx requests monitor | map | `{}` | no |
| cosmos\_db\_scaling\_enabled | Flag to enable Cosmos DB scaling monitor | string | `"true"` | no |
| cosmos\_db\_scaling\_error\_rate\_threshold\_critical | Critical threshold for Cosmos DB scaling monitor | string | `"10"` | no |
| cosmos\_db\_scaling\_error\_rate\_threshold\_warning | Warning threshold for Cosmos DB scaling monitor | string | `"5"` | no |
| cosmos\_db\_scaling\_extra\_tags | Extra tags for Cosmos DB scaling monitor | list | `[]` | no |
| cosmos\_db\_scaling\_message | Custom message for Cosmos DB scaling monitor | string | `""` | no |
| cosmos\_db\_scaling\_silenced | Groups to mute for Cosmos DB scaling monitor | map | `{}` | no |
| cosmos\_db\_scaling\_time\_aggregator | Monitor aggregator for Cosmos DB scaling [available values: min, max or avg] | string | `"min"` | no |
| cosmos\_db\_scaling\_timeframe | Monitor timeframe for Cosmos DB scaling [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| environment | Architecture environment | string | n/a | yes |
@ -60,7 +57,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable Cosmos DB status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Cosmos DB status monitor | list | `[]` | no |
| status\_message | Custom message for Cosmos DB status monitor | string | `""` | no |
| status\_silenced | Groups to mute for Cosmos DB status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for Cosmos DB status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for Cosmos DB status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -44,12 +44,6 @@ variable "status_enabled" {
default = "true"
}
variable "status_silenced" {
description = "Groups to mute for Cosmos DB status monitor"
type = "map"
default = {}
}
variable "status_extra_tags" {
description = "Extra tags for Cosmos DB status monitor"
type = "list"
@ -86,12 +80,6 @@ variable "cosmos_db_4xx_requests_enabled" {
default = "true"
}
variable "cosmos_db_4xx_requests_silenced" {
description = "Groups to mute for Cosmos DB 4xx requests monitor"
type = "map"
default = {}
}
variable "cosmos_db_4xx_request_rate_threshold_critical" {
description = "Critical threshold for Cosmos DB 4xx requests monitor"
default = 80
@ -132,12 +120,6 @@ variable "cosmos_db_5xx_requests_enabled" {
default = "true"
}
variable "cosmos_db_5xx_requests_silenced" {
description = "Groups to mute for Cosmos DB 5xx requests monitor"
type = "map"
default = {}
}
variable "cosmos_db_5xx_request_rate_threshold_critical" {
description = "Critical threshold for Cosmos DB 5xx requests monitor"
default = 80
@ -178,12 +160,6 @@ variable "cosmos_db_scaling_enabled" {
default = "true"
}
variable "cosmos_db_scaling_silenced" {
description = "Groups to mute for Cosmos DB scaling monitor"
type = "map"
default = {}
}
variable "cosmos_db_scaling_error_rate_threshold_critical" {
description = "Critical threshold for Cosmos DB scaling monitor"
default = 10

View File

@ -16,8 +16,6 @@ resource "datadog_monitor" "cosmos_db_status" {
critical = 1
}
silenced = "${var.status_silenced}"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -54,15 +52,13 @@ resource "datadog_monitor" "cosmos_db_4xx_requests" {
, 0) * 100 > ${var.cosmos_db_4xx_request_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.cosmos_db_4xx_request_rate_threshold_critical}"
warning = "${var.cosmos_db_4xx_request_rate_threshold_warning}"
}
silenced = "${var.cosmos_db_4xx_requests_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -90,15 +86,13 @@ resource "datadog_monitor" "cosmos_db_5xx_requests" {
, 0) * 100 > ${var.cosmos_db_5xx_request_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.cosmos_db_5xx_request_rate_threshold_critical}"
warning = "${var.cosmos_db_5xx_request_rate_threshold_warning}"
}
silenced = "${var.cosmos_db_5xx_requests_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -126,15 +120,13 @@ resource "datadog_monitor" "cosmos_db_scaling" {
, 0) * 100 > ${var.cosmos_db_scaling_error_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.cosmos_db_scaling_error_rate_threshold_critical}"
warning = "${var.cosmos_db_scaling_error_rate_threshold_warning}"
}
silenced = "${var.cosmos_db_scaling_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

View File

@ -33,7 +33,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable Datalake Store status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Datalake Store status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | list | `[]` | no |
| status\_message | Custom message for Datalake Store status monitor | string | `""` | no |
| status\_silenced | Groups to mute for Datalake Store status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for Datalake Store status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for Datalake Store status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -44,12 +44,6 @@ variable "status_enabled" {
default = "true"
}
variable "status_silenced" {
description = "Groups to mute for Datalake Store status monitor"
type = "map"
default = {}
}
variable "status_message" {
description = "Custom message for Datalake Store status monitor"
type = "string"

View File

@ -10,9 +10,7 @@ resource "datadog_monitor" "datalakestore_status" {
) < 1
EOQ
type = "metric alert"
silenced = "${var.status_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"

View File

@ -29,7 +29,6 @@ Creates DataDog monitors with the following checks:
| failed\_messages\_rate\_enabled | Flag to enable Event Grid failed messages monitor | string | `"true"` | no |
| failed\_messages\_rate\_extra\_tags | Extra tags for Event Grid failed messages monitor | list | `[]` | no |
| failed\_messages\_rate\_message | Custom message for Event Grid failed messages monitor | string | `""` | no |
| failed\_messages\_rate\_silenced | Groups to mute for Event Grid failed messages monitor | map | `{}` | no |
| failed\_messages\_rate\_thresold\_critical | Failed messages ratio (percentage) to trigger the critical alert | string | `"90"` | no |
| failed\_messages\_rate\_thresold\_warning | Failed messages ratio (percentage) to trigger a warning alert | string | `"50"` | no |
| failed\_messages\_rate\_time\_aggregator | Monitor aggregator for Event Grid failed messages [available values: min, max or avg] | string | `"min"` | no |
@ -42,14 +41,12 @@ Creates DataDog monitors with the following checks:
| no\_successful\_message\_rate\_enabled | Flag to enable Event Grid no successful message monitor | string | `"true"` | no |
| no\_successful\_message\_rate\_extra\_tags | Extra tags for Event Grid no successful message monitor | list | `[]` | no |
| no\_successful\_message\_rate\_message | Custom message for Event Grid no successful message monitor | string | `""` | no |
| no\_successful\_message\_rate\_silenced | Groups to mute for²id no successful message monitor | map | `{}` | no |
| no\_successful\_message\_rate\_time\_aggregator | Monitor aggregator for Event Grid no successful message [available values: min, max or avg] | string | `"min"` | no |
| no\_successful\_message\_rate\_timeframe | Monitor timeframe for Event Grid no successful message [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| unmatched\_events\_rate\_enabled | Flag to enable Event Grid unmatched events monitor | string | `"true"` | no |
| unmatched\_events\_rate\_extra\_tags | Extra tags for Event Grid unmatched events monitor | list | `[]` | no |
| unmatched\_events\_rate\_message | Custom message for Event Grid unmatched events monitor | string | `""` | no |
| unmatched\_events\_rate\_silenced | Groups to mute for Event Grid unmatched events monitor | map | `{}` | no |
| unmatched\_events\_rate\_thresold\_critical | Unmatched events ratio (percentage) to trigger the critical alert | string | `"90"` | no |
| unmatched\_events\_rate\_thresold\_warning | Unmatched events ratio (percentage) to trigger a warning alert | string | `"50"` | no |
| unmatched\_events\_rate\_time\_aggregator | Monitor aggregator for Event Grid unmatched events [available values: min, max or avg] | string | `"min"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# Azure Event Grid specific variables
variable "no_successful_message_rate_silenced" {
description = "Groups to mute for²id no successful message monitor"
type = "map"
default = {}
}
variable "no_successful_message_rate_enabled" {
description = "Flag to enable Event Grid no successful message monitor"
@ -76,12 +71,6 @@ variable "no_successful_message_rate_timeframe" {
default = "last_5m"
}
variable "failed_messages_rate_silenced" {
description = "Groups to mute for Event Grid failed messages monitor"
type = "map"
default = {}
}
variable "failed_messages_rate_enabled" {
description = "Flag to enable Event Grid failed messages monitor"
type = "string"
@ -122,12 +111,6 @@ variable "failed_messages_rate_thresold_warning" {
default = 50
}
variable "unmatched_events_rate_silenced" {
description = "Groups to mute for Event Grid unmatched events monitor"
type = "map"
default = {}
}
variable "unmatched_events_rate_enabled" {
description = "Flag to enable Event Grid unmatched events monitor"
type = "string"

View File

@ -11,8 +11,6 @@ resource "datadog_monitor" "eventgrid_no_successful_message" {
type = "metric alert"
silenced = "${var.no_successful_message_rate_silenced}"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -41,15 +39,13 @@ resource "datadog_monitor" "eventgrid_failed_messages" {
) > ${var.failed_messages_rate_thresold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.failed_messages_rate_thresold_critical}"
warning = "${var.failed_messages_rate_thresold_warning}"
}
silenced = "${var.failed_messages_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -78,15 +74,13 @@ resource "datadog_monitor" "eventgrid_unmatched_events" {
) > ${var.unmatched_events_rate_thresold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.unmatched_events_rate_thresold_critical}"
warning = "${var.unmatched_events_rate_thresold_warning}"
}
silenced = "${var.unmatched_events_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

View File

@ -28,7 +28,6 @@ Creates DataDog monitors with the following checks:
| errors\_rate\_enabled | Flag to enable Event Hub errors monitor | string | `"true"` | no |
| errors\_rate\_extra\_tags | Extra tags for Event Hub errors monitor | list | `[]` | no |
| errors\_rate\_message | Custom message for Event Hub errors monitor | string | `""` | no |
| errors\_rate\_silenced | Groups to mute for Event Hub errors monitor | map | `{}` | no |
| errors\_rate\_thresold\_critical | Errors ratio (percentage) to trigger the critical alert | string | `"90"` | no |
| errors\_rate\_thresold\_warning | Errors ratio (percentage) to trigger a warning alert | string | `"50"` | no |
| errors\_rate\_time\_aggregator | Monitor aggregator for Event Hub errors [available values: min, max or avg] | string | `"min"` | no |
@ -37,7 +36,6 @@ Creates DataDog monitors with the following checks:
| failed\_requests\_rate\_enabled | Flag to enable Event Hub failed requests monitor | string | `"true"` | no |
| failed\_requests\_rate\_extra\_tags | Extra tags for Event Hub failed requests monitor | list | `[]` | no |
| failed\_requests\_rate\_message | Custom message for Event Hub failed requests monitor | string | `""` | no |
| failed\_requests\_rate\_silenced | Groups to mute for Event Hub failed requests monitor | map | `{}` | no |
| failed\_requests\_rate\_thresold\_critical | Failed requests ratio (percentage) to trigger the critical alert | string | `"90"` | no |
| failed\_requests\_rate\_thresold\_warning | Failed requests ratio (percentage) to trigger a warning alert | string | `"50"` | no |
| failed\_requests\_rate\_time\_aggregator | Monitor aggregator for Event Hub failed requests [available values: min, max or avg] | string | `"min"` | no |
@ -51,7 +49,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable Event Hub status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Event Hub status monitor | list | `[]` | no |
| status\_message | Custom message for Event Hub status monitor | string | `""` | no |
| status\_silenced | Groups to mute for Event Hub status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for Event Hub status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for Event Hub status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# Azure Event Hub specific variables
variable "status_silenced" {
description = "Groups to mute for Event Hub status monitor"
type = "map"
default = {}
}
variable "status_enabled" {
description = "Flag to enable Event Hub status monitor"
@ -76,12 +71,6 @@ variable "status_timeframe" {
default = "last_5m"
}
variable "failed_requests_rate_silenced" {
description = "Groups to mute for Event Hub failed requests monitor"
type = "map"
default = {}
}
variable "failed_requests_rate_enabled" {
description = "Flag to enable Event Hub failed requests monitor"
type = "string"
@ -122,12 +111,6 @@ variable "failed_requests_rate_thresold_warning" {
default = 50
}
variable "errors_rate_silenced" {
description = "Groups to mute for Event Hub errors monitor"
type = "map"
default = {}
}
variable "errors_rate_enabled" {
description = "Flag to enable Event Hub errors monitor"
type = "string"

View File

@ -9,9 +9,7 @@ resource "datadog_monitor" "eventhub_status" {
) != 1
EOQ
type = "metric alert"
silenced = "${var.status_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
@ -38,15 +36,13 @@ resource "datadog_monitor" "eventhub_failed_requests" {
) * 100 > ${var.failed_requests_rate_thresold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.failed_requests_rate_thresold_critical}"
warning = "${var.failed_requests_rate_thresold_warning}"
}
silenced = "${var.failed_requests_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -74,15 +70,13 @@ resource "datadog_monitor" "eventhub_errors" {
) * 100 > ${var.errors_rate_thresold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.errors_rate_thresold_critical}"
warning = "${var.errors_rate_thresold_warning}"
}
silenced = "${var.errors_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

View File

@ -32,7 +32,6 @@ Creates DataDog monitors with the following checks:
| high\_connections\_count\_enabled | Flag to enable Functions high connections count monitor | string | `"true"` | no |
| high\_connections\_count\_extra\_tags | Extra tags for Functions high connections count monitor | list | `[]` | no |
| high\_connections\_count\_message | Custom message for Functions high connections count monitor | string | `""` | no |
| high\_connections\_count\_silenced | Groups to mute for Functions high connections count monitor | map | `{}` | no |
| high\_connections\_count\_threshold\_critical | Alerting threshold for Functions high connections count | string | `"590"` | no |
| high\_connections\_count\_threshold\_warning | Warning threshold for Functions high connections count | string | `"550"` | no |
| high\_connections\_count\_time\_aggregator | Monitor aggregator for Functions high connections count [available values: min, max or avg] | string | `"min"` | no |
@ -40,7 +39,6 @@ Creates DataDog monitors with the following checks:
| high\_threads\_count\_enabled | Flag to enable Functions high threads count monitor | string | `"true"` | no |
| high\_threads\_count\_extra\_tags | Extra tags for Functions high threads count monitor | list | `[]` | no |
| high\_threads\_count\_message | Custom message for Functions high threads count monitor | string | `""` | no |
| high\_threads\_count\_silenced | Groups to mute for Functions high threads count monitor | map | `{}` | no |
| high\_threads\_count\_threshold\_critical | Alerting threshold for Functions high threads count | string | `"510"` | no |
| high\_threads\_count\_threshold\_warning | Warning threshold for Functions high threads count | string | `"490"` | no |
| high\_threads\_count\_time\_aggregator | Monitor aggregator for Functions high threads count [available values: min, max or avg] | string | `"min"` | no |
@ -48,7 +46,6 @@ Creates DataDog monitors with the following checks:
| http\_5xx\_errors\_rate\_enabled | Flag to enable Functions Http 5xx errors rate monitor | string | `"true"` | no |
| http\_5xx\_errors\_rate\_extra\_tags | Extra tags for Functions Http 5xx errors rate monitor | list | `[]` | no |
| http\_5xx\_errors\_rate\_message | Custom message for Functions Http 5xx errors rate monitor | string | `""` | no |
| http\_5xx\_errors\_rate\_silenced | Groups to mute for Functions Http 5xx errors rate monitor | map | `{}` | no |
| http\_5xx\_errors\_rate\_threshold\_critical | Alerting threshold for Functions Http 5xx errors rate | string | `"20"` | no |
| http\_5xx\_errors\_rate\_threshold\_warning | Warning threshold for Functions Http 5xx errors rate | string | `"10"` | no |
| http\_5xx\_errors\_rate\_time\_aggregator | Monitor aggregator for Functions Http 5xx errors rate [available values: min, max or avg] | string | `"min"` | no |

View File

@ -38,11 +38,6 @@ variable "prefix_slug" {
}
# Azure Function App specific variables
variable "http_5xx_errors_rate_silenced" {
description = "Groups to mute for Functions Http 5xx errors rate monitor"
type = "map"
default = {}
}
variable "http_5xx_errors_rate_enabled" {
description = "Flag to enable Functions Http 5xx errors rate monitor"
@ -84,12 +79,6 @@ variable "http_5xx_errors_rate_threshold_warning" {
description = "Warning threshold for Functions Http 5xx errors rate"
}
variable "high_connections_count_silenced" {
description = "Groups to mute for Functions high connections count monitor"
type = "map"
default = {}
}
variable "high_connections_count_enabled" {
description = "Flag to enable Functions high connections count monitor"
type = "string"
@ -130,12 +119,6 @@ variable "high_connections_count_threshold_warning" {
description = "Warning threshold for Functions high connections count"
}
variable "high_threads_count_silenced" {
description = "Groups to mute for Functions high threads count monitor"
type = "map"
default = {}
}
variable "high_threads_count_enabled" {
description = "Flag to enable Functions high threads count monitor"
type = "string"

View File

@ -1,7 +1,7 @@
resource "datadog_monitor" "function_http_5xx_errors_rate" {
count = "${var.http_5xx_errors_rate_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Function App HTTP 5xx errors too high {{#is_alert}}{{{comparator}}} {{threshold}}% ({{value}}%){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}}% ({{value}}%){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.http_5xx_errors_rate_message, var.message)}"
query = <<EOQ
@ -19,8 +19,6 @@ resource "datadog_monitor" "function_http_5xx_errors_rate" {
critical = "${var.http_5xx_errors_rate_threshold_critical}"
}
silenced = "${var.http_5xx_errors_rate_silenced}"
notify_no_data = false
renotify_interval = 0
require_full_window = false
@ -33,7 +31,7 @@ resource "datadog_monitor" "function_http_5xx_errors_rate" {
resource "datadog_monitor" "function_high_connections_count" {
count = "${var.high_connections_count_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Function App connections count too high {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.high_connections_count_message, var.message)}"
query = <<EOQ
@ -50,8 +48,6 @@ resource "datadog_monitor" "function_high_connections_count" {
critical = "${var.high_connections_count_threshold_critical}"
}
silenced = "${var.high_connections_count_silenced}"
notify_no_data = false
renotify_interval = 0
require_full_window = false
@ -64,7 +60,7 @@ resource "datadog_monitor" "function_high_connections_count" {
resource "datadog_monitor" "function_high_threads_count" {
count = "${var.high_threads_count_enabled == "true" ? 1 : 0}"
name = "${var.prefix_slug == "" ? "" : "[${var.prefix_slug}]"}[${var.environment}] Function App threads count too high {{#is_alert}}{{{comparator}}} {{threshold}} ({{value}}){{/is_alert}}{{#is_warning}}{{{comparator}}} {{warn_threshold}} ({{value}}){{/is_warning}}"
type = "metric alert"
type = "query alert"
message = "${coalesce(var.high_threads_count_message, var.message)}"
query = <<EOQ
@ -81,8 +77,6 @@ resource "datadog_monitor" "function_high_threads_count" {
critical = "${var.high_threads_count_threshold_critical}"
}
silenced = "${var.high_threads_count_silenced}"
notify_no_data = false
renotify_interval = 0
require_full_window = false

View File

@ -40,7 +40,6 @@ Creates DataDog monitors with the following checks:
| dropped\_d2c\_telemetry\_egress\_message | Custom message for IoT Hub dropped d2c telemetry monitor | string | `""` | no |
| dropped\_d2c\_telemetry\_egress\_rate\_threshold\_critical | D2C Telemetry Dropped limit (critical threshold) | string | `"90"` | no |
| dropped\_d2c\_telemetry\_egress\_rate\_threshold\_warning | D2C Telemetry Dropped limit (warning threshold) | string | `"50"` | no |
| dropped\_d2c\_telemetry\_egress\_silenced | Groups to mute for IoT Hub dropped d2c telemetry monitor | map | `{}` | no |
| dropped\_d2c\_telemetry\_egress\_time\_aggregator | Monitor aggregator for IoT Hub dropped d2c telemetry [available values: min, max, sum or avg] | string | `"min"` | no |
| dropped\_d2c\_telemetry\_egress\_timeframe | Monitor timeframe for IoT Hub dropped d2c telemetry [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| environment | Architecture Environment | string | n/a | yes |
@ -48,7 +47,6 @@ Creates DataDog monitors with the following checks:
| failed\_c2d\_methods\_rate\_enabled | Flag to enable IoT Hub failed c2d methods monitor | string | `"true"` | no |
| failed\_c2d\_methods\_rate\_extra\_tags | Extra tags for IoT Hub failed c2d methods monitor | list | `[]` | no |
| failed\_c2d\_methods\_rate\_message | Custom message for IoT Hub failed c2d method monitor | string | `""` | no |
| failed\_c2d\_methods\_rate\_silenced | Groups to mute for IoT Hub failed c2d methods monitor | map | `{}` | no |
| failed\_c2d\_methods\_rate\_threshold\_critical | C2D Methods Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_c2d\_methods\_rate\_threshold\_warning | C2D Methods Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_c2d\_methods\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed c2d method [available values: min, max, sum or avg] | string | `"min"` | no |
@ -56,7 +54,6 @@ Creates DataDog monitors with the following checks:
| failed\_c2d\_twin\_read\_rate\_enabled | Flag to enable IoT Hub failed c2d twin read monitor | string | `"true"` | no |
| failed\_c2d\_twin\_read\_rate\_extra\_tags | Extra tags for IoT Hub failed c2d twin read monitor | list | `[]` | no |
| failed\_c2d\_twin\_read\_rate\_message | Custom message for IoT Hub failed c2d twin read monitor | string | `""` | no |
| failed\_c2d\_twin\_read\_rate\_silenced | Groups to mute for IoT Hub failed c2d twin read monitor | map | `{}` | no |
| failed\_c2d\_twin\_read\_rate\_threshold\_critical | C2D Twin Read Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_c2d\_twin\_read\_rate\_threshold\_warning | C2D Twin Read Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_c2d\_twin\_read\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed c2d twin read [available values: min, max, sum or avg] | string | `"min"` | no |
@ -64,7 +61,6 @@ Creates DataDog monitors with the following checks:
| failed\_c2d\_twin\_update\_rate\_enabled | Flag to enable IoT Hub failed c2d twin update monitor | string | `"true"` | no |
| failed\_c2d\_twin\_update\_rate\_extra\_tags | Extra tags for IoT Hub failed c2d twin update monitor | list | `[]` | no |
| failed\_c2d\_twin\_update\_rate\_message | Custom message for IoT Hub failed c2d twin update monitor | string | `""` | no |
| failed\_c2d\_twin\_update\_rate\_silenced | Groups to mute for IoT Hub failed c2d twin update monitor | map | `{}` | no |
| failed\_c2d\_twin\_update\_rate\_threshold\_critical | C2D Twin Update Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_c2d\_twin\_update\_rate\_threshold\_warning | C2D Twin Update Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_c2d\_twin\_update\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed c2d twin update [available values: min, max, sum or avg] | string | `"min"` | no |
@ -72,7 +68,6 @@ Creates DataDog monitors with the following checks:
| failed\_d2c\_twin\_read\_rate\_enabled | Flag to enable IoT Hub failed d2c twin read monitor | string | `"true"` | no |
| failed\_d2c\_twin\_read\_rate\_extra\_tags | Extra tags for IoT Hub failed d2c twin read monitor | list | `[]` | no |
| failed\_d2c\_twin\_read\_rate\_message | Custom message for IoT Hub failed d2c twin read monitor | string | `""` | no |
| failed\_d2c\_twin\_read\_rate\_silenced | Groups to mute for IoT Hub failed d2c twin read monitor | map | `{}` | no |
| failed\_d2c\_twin\_read\_rate\_threshold\_critical | D2C Twin Read Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_d2c\_twin\_read\_rate\_threshold\_warning | D2C Twin Read Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_d2c\_twin\_read\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed d2c twin read [available values: min, max, sum or avg] | string | `"min"` | no |
@ -80,7 +75,6 @@ Creates DataDog monitors with the following checks:
| failed\_d2c\_twin\_update\_rate\_enabled | Flag to enable IoT Hub failed d2c twin update monitor | string | `"true"` | no |
| failed\_d2c\_twin\_update\_rate\_extra\_tags | Extra tags for IoT Hub failed d2c twin update monitor | list | `[]` | no |
| failed\_d2c\_twin\_update\_rate\_message | Custom message for IoT Hub failed d2c twin update monitor | string | `""` | no |
| failed\_d2c\_twin\_update\_rate\_silenced | Groups to mute for IoT Hub failed d2c twin update monitor | map | `{}` | no |
| failed\_d2c\_twin\_update\_rate\_threshold\_critical | D2C Twin Update Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_d2c\_twin\_update\_rate\_threshold\_warning | D2C Twin Update Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_d2c\_twin\_update\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed d2c twin update [available values: min, max, sum or avg] | string | `"min"` | no |
@ -88,7 +82,6 @@ Creates DataDog monitors with the following checks:
| failed\_jobs\_rate\_enabled | Flag to enable IoT Hub failed jobs monitor | string | `"true"` | no |
| failed\_jobs\_rate\_extra\_tags | Extra tags for IoT Hub failed jobs monitor | list | `[]` | no |
| failed\_jobs\_rate\_message | Custom message for IoT Hub failed jobs monitor | string | `""` | no |
| failed\_jobs\_rate\_silenced | Groups to mute for IoT Hub failed jobs monitor | map | `{}` | no |
| failed\_jobs\_rate\_threshold\_critical | Jobs Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_jobs\_rate\_threshold\_warning | Jobs Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_jobs\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed jobs [available values: min, max, sum or avg] | string | `"min"` | no |
@ -96,7 +89,6 @@ Creates DataDog monitors with the following checks:
| failed\_listjobs\_rate\_enabled | Flag to enable IoT Hub failed list jobs monitor | string | `"true"` | no |
| failed\_listjobs\_rate\_extra\_tags | Extra tags for IoT Hub failed list jobs monitor | list | `[]` | no |
| failed\_listjobs\_rate\_message | Custom message for IoT Hub failed list jobs monitor | string | `""` | no |
| failed\_listjobs\_rate\_silenced | Groups to mute for IoT Hub failed list jobs monitor | map | `{}` | no |
| failed\_listjobs\_rate\_threshold\_critical | ListJobs Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_listjobs\_rate\_threshold\_warning | ListJobs Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_listjobs\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed list jobs [available values: min, max, sum or avg] | string | `"min"` | no |
@ -104,7 +96,6 @@ Creates DataDog monitors with the following checks:
| failed\_queryjobs\_rate\_enabled | Flag to enable IoT Hub failed query jobs monitor | string | `"true"` | no |
| failed\_queryjobs\_rate\_extra\_tags | Extra tags for IoT Hub failed query jobs monitor | list | `[]` | no |
| failed\_queryjobs\_rate\_message | Custom message for IoT Hub failed query jobs monitor | string | `""` | no |
| failed\_queryjobs\_rate\_silenced | Groups to mute for IoT Hub failed query jobs monitor | map | `{}` | no |
| failed\_queryjobs\_rate\_threshold\_critical | QueryJobs Failed rate limit (critical threshold) | string | `"90"` | no |
| failed\_queryjobs\_rate\_threshold\_warning | QueryJobs Failed rate limit (warning threshold) | string | `"50"` | no |
| failed\_queryjobs\_rate\_time\_aggregator | Monitor aggregator for IoT Hub failed query jobs [available values: min, max, sum or avg] | string | `"min"` | no |
@ -117,7 +108,6 @@ Creates DataDog monitors with the following checks:
| invalid\_d2c\_telemetry\_egress\_message | Custom message for IoT Hub invalid d2c telemetry monitor | string | `""` | no |
| invalid\_d2c\_telemetry\_egress\_rate\_threshold\_critical | D2C Telemetry Invalid limit (critical threshold) | string | `"90"` | no |
| invalid\_d2c\_telemetry\_egress\_rate\_threshold\_warning | D2C Telemetry Invalid limit (warning threshold) | string | `"50"` | no |
| invalid\_d2c\_telemetry\_egress\_silenced | Groups to mute for IoT Hub invalid d2c telemetry monitor | map | `{}` | no |
| invalid\_d2c\_telemetry\_egress\_time\_aggregator | Monitor aggregator for IoT Hub invalid d2c telemetry [available values: min, max, sum or avg] | string | `"min"` | no |
| invalid\_d2c\_telemetry\_egress\_timeframe | Monitor timeframe for IoT Hub invalid d2c telemetry [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| message | Message sent when an alert is triggered | string | n/a | yes |
@ -127,25 +117,21 @@ Creates DataDog monitors with the following checks:
| orphaned\_d2c\_telemetry\_egress\_message | Custom message for IoT Hub orphaned d2c telemetry monitor | string | `""` | no |
| orphaned\_d2c\_telemetry\_egress\_rate\_threshold\_critical | D2C Telemetry Orphaned limit (critical threshold) | string | `"90"` | no |
| orphaned\_d2c\_telemetry\_egress\_rate\_threshold\_warning | D2C Telemetry Orphaned limit (warning threshold) | string | `"50"` | no |
| orphaned\_d2c\_telemetry\_egress\_silenced | Groups to mute for IoT Hub orphaned d2c telemetry monitor | map | `{}` | no |
| orphaned\_d2c\_telemetry\_egress\_time\_aggregator | Monitor aggregator for IoT Hub orphaned d2c telemetry [available values: min, max, sum or avg] | string | `"min"` | no |
| orphaned\_d2c\_telemetry\_egress\_timeframe | Monitor timeframe for IoT Hub orphaned d2c telemetry [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |
| status\_enabled | Flag to enable IoT Hub status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for IoT Hub status monitor | list | `[]` | no |
| status\_message | Custom message for IoT Hub status monitor | string | `""` | no |
| status\_silenced | Groups to mute for IoT Hub status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for IoT Hub status [available values: min, max, sum or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for IoT Hub status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| too\_many\_d2c\_telemetry\_ingress\_nosent\_enabled | Flag to enable IoT Hub unsent d2c telemetry monitor | string | `"true"` | no |
| too\_many\_d2c\_telemetry\_ingress\_nosent\_extra\_tags | Extra tags for IoT Hub unsent d2c telemetry monitor | list | `[]` | no |
| too\_many\_d2c\_telemetry\_ingress\_nosent\_message | Custom message for IoT Hub unsent d2c telemetry monitor | string | `""` | no |
| too\_many\_d2c\_telemetry\_ingress\_nosent\_silenced | Groups to mute for IoT Hub unsent d2c telemetry monitor | map | `{}` | no |
| too\_many\_d2c\_telemetry\_ingress\_nosent\_timeframe | Monitor timeframe for IoT Hub unsent d2c telemetry [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| total\_devices\_enabled | Flag to enable IoT Hub total devices monitor | string | `"true"` | no |
| total\_devices\_extra\_tags | Extra tags for IoT Hub total devices monitor | list | `[]` | no |
| total\_devices\_message | Custom message for IoT Hub total devices monitor | string | `""` | no |
| total\_devices\_silenced | Groups to mute for IoT Hub total devices monitor | map | `{}` | no |
| total\_devices\_time\_aggregator | Monitor aggregator for IoT Hub total devices [available values: min, max, sum or avg] | string | `"min"` | no |
| total\_devices\_timeframe | Monitor timeframe for IoT Hub total devices [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# IOT Hub specific variables
variable "status_silenced" {
description = "Groups to mute for IoT Hub status monitor"
type = "map"
default = {}
}
variable "status_enabled" {
description = "Flag to enable IoT Hub status monitor"
@ -76,12 +71,6 @@ variable "status_timeframe" {
default = "last_5m"
}
variable "total_devices_silenced" {
description = "Groups to mute for IoT Hub total devices monitor"
type = "map"
default = {}
}
variable "total_devices_enabled" {
description = "Flag to enable IoT Hub total devices monitor"
type = "string"
@ -112,12 +101,6 @@ variable "total_devices_timeframe" {
default = "last_5m"
}
variable "too_many_d2c_telemetry_ingress_nosent_silenced" {
description = "Groups to mute for IoT Hub unsent d2c telemetry monitor"
type = "map"
default = {}
}
variable "too_many_d2c_telemetry_ingress_nosent_enabled" {
description = "Flag to enable IoT Hub unsent d2c telemetry monitor"
type = "string"
@ -142,12 +125,6 @@ variable "too_many_d2c_telemetry_ingress_nosent_timeframe" {
default = "last_5m"
}
variable "failed_jobs_rate_silenced" {
description = "Groups to mute for IoT Hub failed jobs monitor"
type = "map"
default = {}
}
variable "failed_jobs_rate_enabled" {
description = "Flag to enable IoT Hub failed jobs monitor"
type = "string"
@ -188,12 +165,6 @@ variable "failed_jobs_rate_threshold_critical" {
default = 90
}
variable "failed_listjobs_rate_silenced" {
description = "Groups to mute for IoT Hub failed list jobs monitor"
type = "map"
default = {}
}
variable "failed_listjobs_rate_enabled" {
description = "Flag to enable IoT Hub failed list jobs monitor"
type = "string"
@ -234,12 +205,6 @@ variable "failed_listjobs_rate_threshold_critical" {
default = 90
}
variable "failed_queryjobs_rate_silenced" {
description = "Groups to mute for IoT Hub failed query jobs monitor"
type = "map"
default = {}
}
variable "failed_queryjobs_rate_enabled" {
description = "Flag to enable IoT Hub failed query jobs monitor"
type = "string"
@ -280,12 +245,6 @@ variable "failed_queryjobs_rate_threshold_critical" {
default = 90
}
variable "failed_c2d_methods_rate_silenced" {
description = "Groups to mute for IoT Hub failed c2d methods monitor"
type = "map"
default = {}
}
variable "failed_c2d_methods_rate_enabled" {
description = "Flag to enable IoT Hub failed c2d methods monitor"
type = "string"
@ -326,12 +285,6 @@ variable "failed_c2d_methods_rate_threshold_critical" {
default = 90
}
variable "failed_c2d_twin_read_rate_silenced" {
description = "Groups to mute for IoT Hub failed c2d twin read monitor"
type = "map"
default = {}
}
variable "failed_c2d_twin_read_rate_enabled" {
description = "Flag to enable IoT Hub failed c2d twin read monitor"
type = "string"
@ -372,12 +325,6 @@ variable "failed_c2d_twin_read_rate_threshold_critical" {
default = 90
}
variable "failed_c2d_twin_update_rate_silenced" {
description = "Groups to mute for IoT Hub failed c2d twin update monitor"
type = "map"
default = {}
}
variable "failed_c2d_twin_update_rate_enabled" {
description = "Flag to enable IoT Hub failed c2d twin update monitor"
type = "string"
@ -418,12 +365,6 @@ variable "failed_c2d_twin_update_rate_threshold_critical" {
default = 90
}
variable "failed_d2c_twin_read_rate_silenced" {
description = "Groups to mute for IoT Hub failed d2c twin read monitor"
type = "map"
default = {}
}
variable "failed_d2c_twin_read_rate_enabled" {
description = "Flag to enable IoT Hub failed d2c twin read monitor"
type = "string"
@ -464,12 +405,6 @@ variable "failed_d2c_twin_read_rate_threshold_critical" {
default = 90
}
variable "failed_d2c_twin_update_rate_silenced" {
description = "Groups to mute for IoT Hub failed d2c twin update monitor"
type = "map"
default = {}
}
variable "failed_d2c_twin_update_rate_enabled" {
description = "Flag to enable IoT Hub failed d2c twin update monitor"
type = "string"
@ -510,12 +445,6 @@ variable "failed_d2c_twin_update_rate_threshold_critical" {
default = 90
}
variable "dropped_d2c_telemetry_egress_silenced" {
description = "Groups to mute for IoT Hub dropped d2c telemetry monitor"
type = "map"
default = {}
}
variable "dropped_d2c_telemetry_egress_enabled" {
description = "Flag to enable IoT Hub dropped d2c telemetry monitor"
type = "string"
@ -556,12 +485,6 @@ variable "dropped_d2c_telemetry_egress_rate_threshold_critical" {
default = 90
}
variable "orphaned_d2c_telemetry_egress_silenced" {
description = "Groups to mute for IoT Hub orphaned d2c telemetry monitor"
type = "map"
default = {}
}
variable "orphaned_d2c_telemetry_egress_enabled" {
description = "Flag to enable IoT Hub orphaned d2c telemetry monitor"
type = "string"
@ -602,12 +525,6 @@ variable "orphaned_d2c_telemetry_egress_rate_threshold_critical" {
default = 90
}
variable "invalid_d2c_telemetry_egress_silenced" {
description = "Groups to mute for IoT Hub invalid d2c telemetry monitor"
type = "map"
default = {}
}
variable "invalid_d2c_telemetry_egress_enabled" {
description = "Flag to enable IoT Hub invalid d2c telemetry monitor"
type = "string"

View File

@ -12,15 +12,13 @@ resource "datadog_monitor" "too_many_jobs_failed" {
* 100 , 0) > ${var.failed_jobs_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_jobs_rate_threshold_warning}"
critical = "${var.failed_jobs_rate_threshold_critical}"
}
silenced = "${var.failed_jobs_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -48,15 +46,13 @@ resource "datadog_monitor" "too_many_list_jobs_failed" {
* 100, 0) > ${var.failed_listjobs_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_listjobs_rate_threshold_warning}"
critical = "${var.failed_listjobs_rate_threshold_critical}"
}
silenced = "${var.failed_listjobs_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -84,15 +80,13 @@ resource "datadog_monitor" "too_many_query_jobs_failed" {
* 100, 0) > ${var.failed_queryjobs_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_queryjobs_rate_threshold_warning}"
critical = "${var.failed_queryjobs_rate_threshold_critical}"
}
silenced = "${var.failed_queryjobs_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -117,9 +111,7 @@ resource "datadog_monitor" "status" {
) < 1
EOQ
type = "metric alert"
silenced = "${var.status_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
@ -145,9 +137,7 @@ resource "datadog_monitor" "total_devices" {
) == 0
EOQ
type = "metric alert"
silenced = "${var.total_devices_silenced}"
type = "query alert"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
@ -176,15 +166,13 @@ resource "datadog_monitor" "too_many_c2d_methods_failed" {
* 100, 0) > ${var.failed_c2d_methods_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_c2d_methods_rate_threshold_warning}"
critical = "${var.failed_c2d_methods_rate_threshold_critical}"
}
silenced = "${var.failed_c2d_methods_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -212,15 +200,13 @@ resource "datadog_monitor" "too_many_c2d_twin_read_failed" {
* 100, 0) > ${var.failed_c2d_twin_read_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_c2d_twin_read_rate_threshold_warning}"
critical = "${var.failed_c2d_twin_read_rate_threshold_critical}"
}
silenced = "${var.failed_c2d_twin_read_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -248,15 +234,13 @@ resource "datadog_monitor" "too_many_c2d_twin_update_failed" {
* 100, 0) > ${var.failed_c2d_twin_update_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_c2d_twin_update_rate_threshold_warning}"
critical = "${var.failed_c2d_twin_update_rate_threshold_critical}"
}
silenced = "${var.failed_c2d_twin_update_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -284,15 +268,13 @@ resource "datadog_monitor" "too_many_d2c_twin_read_failed" {
* 100, 0) > ${var.failed_d2c_twin_read_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_d2c_twin_read_rate_threshold_warning}"
critical = "${var.failed_d2c_twin_read_rate_threshold_critical}"
}
silenced = "${var.failed_d2c_twin_read_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -320,15 +302,13 @@ resource "datadog_monitor" "too_many_d2c_twin_update_failed" {
* 100, 0) > ${var.failed_d2c_twin_update_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.failed_d2c_twin_update_rate_threshold_warning}"
critical = "${var.failed_d2c_twin_update_rate_threshold_critical}"
}
silenced = "${var.failed_d2c_twin_update_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -358,15 +338,13 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_dropped" {
* 100, 0) > ${var.dropped_d2c_telemetry_egress_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.dropped_d2c_telemetry_egress_rate_threshold_warning}"
critical = "${var.dropped_d2c_telemetry_egress_rate_threshold_critical}"
}
silenced = "${var.dropped_d2c_telemetry_egress_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -396,15 +374,13 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_orphaned" {
* 100, 0) > ${var.orphaned_d2c_telemetry_egress_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.orphaned_d2c_telemetry_egress_rate_threshold_warning}"
critical = "${var.orphaned_d2c_telemetry_egress_rate_threshold_critical}"
}
silenced = "${var.orphaned_d2c_telemetry_egress_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -434,15 +410,13 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_invalid" {
* 100, 0) > ${var.invalid_d2c_telemetry_egress_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.invalid_d2c_telemetry_egress_rate_threshold_warning}"
critical = "${var.invalid_d2c_telemetry_egress_rate_threshold_critical}"
}
silenced = "${var.invalid_d2c_telemetry_egress_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -469,9 +443,7 @@ resource "datadog_monitor" "too_many_d2c_telemetry_ingress_nosent" {
, 0) > 0
EOQ
type = "metric alert"
silenced = "${var.too_many_d2c_telemetry_ingress_nosent_silenced}"
type = "query alert"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"

View File

@ -27,7 +27,6 @@ Creates DataDog monitors with the following checks:
| api\_latency\_enabled | Flag to enable Key Vault API latency monitor | string | `"true"` | no |
| api\_latency\_extra\_tags | Extra tags for Key Vault API latency monitor | list | `[]` | no |
| api\_latency\_message | Custom message for Key Vault API latency monitor | string | `""` | no |
| api\_latency\_silenced | Groups to mute for Key Vault API latency monitor | map | `{}` | no |
| api\_latency\_threshold\_critical | Critical threshold for Key Vault API latency rate | string | `"100"` | no |
| api\_latency\_threshold\_warning | Warning threshold for Key Vault API latency rate | string | `"80"` | no |
| api\_latency\_time\_aggregator | Monitor aggregator for Key Vault API latency [available values: min, max or avg] | string | `"min"` | no |
@ -35,7 +34,6 @@ Creates DataDog monitors with the following checks:
| api\_result\_enabled | Flag to enable Key Vault API result monitor | string | `"true"` | no |
| api\_result\_extra\_tags | Extra tags for Key Vault API result monitor | list | `[]` | no |
| api\_result\_message | Custom message for Key Vault API result monitor | string | `""` | no |
| api\_result\_silenced | Groups to mute for Key Vault API result monitor | map | `{}` | no |
| api\_result\_threshold\_critical | Critical threshold for Key Vault API result rate | string | `"10"` | no |
| api\_result\_threshold\_warning | Warning threshold for Key Vault API result rate | string | `"30"` | no |
| api\_result\_time\_aggregator | Monitor aggregator for Key Vault API result [available values: min, max or avg] | string | `"max"` | no |
@ -51,7 +49,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable Key Vault status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Key Vault status monitor | list | `[]` | no |
| status\_message | Custom message for Key Vault status monitor | string | `""` | no |
| status\_silenced | Groups to mute for Key Vault status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for Key Vault status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for Key Vault status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -44,12 +44,6 @@ variable "status_enabled" {
default = "true"
}
variable "status_silenced" {
description = "Groups to mute for Key Vault status monitor"
type = "map"
default = {}
}
variable "status_message" {
description = "Custom message for Key Vault status monitor"
type = "string"
@ -79,12 +73,6 @@ variable "api_result_enabled" {
default = "true"
}
variable "api_result_silenced" {
description = "Groups to mute for Key Vault API result monitor"
type = "map"
default = {}
}
variable "api_result_message" {
description = "Custom message for Key Vault API result monitor"
type = "string"
@ -124,12 +112,6 @@ variable "api_latency_enabled" {
default = "true"
}
variable "api_latency_silenced" {
description = "Groups to mute for Key Vault API latency monitor"
type = "map"
default = {}
}
variable "api_latency_message" {
description = "Custom message for Key Vault API latency monitor"
type = "string"

View File

@ -10,9 +10,7 @@ resource "datadog_monitor" "keyvault_status" {
) < 1
EOQ
type = "metric alert"
silenced = "${var.status_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
@ -47,9 +45,7 @@ resource "datadog_monitor" "keyvault_api_result" {
warning = "${var.api_result_threshold_warning}"
}
type = "metric alert"
silenced = "${var.api_result_silenced}"
type = "query alert"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
@ -83,8 +79,6 @@ resource "datadog_monitor" "keyvault_api_latency" {
type = "metric alert"
silenced = "${var.api_latency_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

View File

@ -33,7 +33,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable Load Balancer status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Load Balancer status monitor | list | `[]` | no |
| status\_message | Custom message for Load Balancer status monitor | string | `""` | no |
| status\_silenced | Groups to mute for Load Balancer status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for Load Balancer status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for Load Balancer status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -44,12 +44,6 @@ variable "status_enabled" {
default = "true"
}
variable "status_silenced" {
description = "Groups to mute for Load Balancer status monitor"
type = "map"
default = {}
}
variable "status_message" {
description = "Custom message for Load Balancer status monitor"
type = "string"

View File

@ -10,9 +10,7 @@ resource "datadog_monitor" "loadbalancer_status" {
) < 1
EOQ
type = "metric alert"
silenced = "${var.status_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"

View File

@ -28,7 +28,6 @@ Creates DataDog monitors with the following checks:
| cpu\_usage\_enabled | Flag to enable Mysql status monitor | string | `"true"` | no |
| cpu\_usage\_extra\_tags | Extra tags for Mysql status monitor | list | `[]` | no |
| cpu\_usage\_message | Custom message for Mysql CPU monitor | string | `""` | no |
| cpu\_usage\_silenced | Groups to mute for Mysql CPU monitor | map | `{}` | no |
| cpu\_usage\_threshold\_critical | Mysql CPU usage in percent (critical threshold) | string | `"90"` | no |
| cpu\_usage\_threshold\_warning | Mysql CPU usage in percent (warning threshold) | string | `"80"` | no |
| cpu\_usage\_time\_aggregator | Monitor aggregator for Mysql CPU [available values: min, max or avg] | string | `"min"` | no |
@ -40,7 +39,6 @@ Creates DataDog monitors with the following checks:
| free\_storage\_enabled | Flag to enable Mysql status monitor | string | `"true"` | no |
| free\_storage\_extra\_tags | Extra tags for Mysql status monitor | list | `[]` | no |
| free\_storage\_message | Custom message for Mysql Free Storage monitor | string | `""` | no |
| free\_storage\_silenced | Groups to mute for Mysql Free Storage monitor | map | `{}` | no |
| free\_storage\_threshold\_critical | Mysql Free Storage remaining in percent (critical threshold) | string | `"10"` | no |
| free\_storage\_threshold\_warning | Mysql Free Storage remaining in percent (warning threshold) | string | `"20"` | no |
| free\_storage\_time\_aggregator | Monitor aggregator for Mysql Free Storage [available values: min, max or avg] | string | `"min"` | no |
@ -48,7 +46,6 @@ Creates DataDog monitors with the following checks:
| io\_consumption\_enabled | Flag to enable Mysql status monitor | string | `"true"` | no |
| io\_consumption\_extra\_tags | Extra tags for Mysql status monitor | list | `[]` | no |
| io\_consumption\_message | Custom message for Mysql IO consumption monitor | string | `""` | no |
| io\_consumption\_silenced | Groups to mute for Mysql IO consumption monitor | map | `{}` | no |
| io\_consumption\_threshold\_critical | Mysql IO consumption in percent (critical threshold) | string | `"90"` | no |
| io\_consumption\_threshold\_warning | Mysql IO consumption in percent (warning threshold) | string | `"80"` | no |
| io\_consumption\_time\_aggregator | Monitor aggregator for Mysql IO consumption [available values: min, max or avg] | string | `"min"` | no |
@ -56,7 +53,6 @@ Creates DataDog monitors with the following checks:
| memory\_usage\_enabled | Flag to enable Mysql status monitor | string | `"true"` | no |
| memory\_usage\_extra\_tags | Extra tags for Mysql status monitor | list | `[]` | no |
| memory\_usage\_message | Custom message for Mysql memory monitor | string | `""` | no |
| memory\_usage\_silenced | Groups to mute for Mysql memory monitor | map | `{}` | no |
| memory\_usage\_threshold\_critical | Mysql memory usage in percent (critical threshold) | string | `"90"` | no |
| memory\_usage\_threshold\_warning | Mysql memory usage in percent (warning threshold) | string | `"80"` | no |
| memory\_usage\_time\_aggregator | Monitor aggregator for Mysql memory [available values: min, max or avg] | string | `"min"` | no |

View File

@ -35,11 +35,6 @@ variable "filter_tags_custom" {
}
# Azure Databases for MySQL Servers specific variables
variable "cpu_usage_silenced" {
description = "Groups to mute for Mysql CPU monitor"
type = "map"
default = {}
}
variable "cpu_usage_enabled" {
description = "Flag to enable Mysql status monitor"
@ -81,12 +76,6 @@ variable "cpu_usage_threshold_critical" {
default = "90"
}
variable "free_storage_silenced" {
description = "Groups to mute for Mysql Free Storage monitor"
type = "map"
default = {}
}
variable "free_storage_enabled" {
description = "Flag to enable Mysql status monitor"
type = "string"
@ -127,12 +116,6 @@ variable "free_storage_threshold_critical" {
default = "10"
}
variable "io_consumption_silenced" {
description = "Groups to mute for Mysql IO consumption monitor"
type = "map"
default = {}
}
variable "io_consumption_enabled" {
description = "Flag to enable Mysql status monitor"
type = "string"
@ -173,12 +156,6 @@ variable "io_consumption_threshold_critical" {
default = "90"
}
variable "memory_usage_silenced" {
description = "Groups to mute for Mysql memory monitor"
type = "map"
default = {}
}
variable "memory_usage_enabled" {
description = "Flag to enable Mysql status monitor"
type = "string"

View File

@ -9,15 +9,13 @@ resource "datadog_monitor" "mysql_cpu_usage" {
) > ${var.cpu_usage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.cpu_usage_threshold_critical}"
warning = "${var.cpu_usage_threshold_warning}"
}
silenced = "${var.cpu_usage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -42,15 +40,13 @@ resource "datadog_monitor" "mysql_free_storage" {
) < ${var.free_storage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.free_storage_threshold_critical}"
warning = "${var.free_storage_threshold_warning}"
}
silenced = "${var.free_storage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -75,15 +71,13 @@ resource "datadog_monitor" "mysql_io_consumption" {
) > ${var.io_consumption_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.io_consumption_threshold_critical}"
warning = "${var.io_consumption_threshold_warning}"
}
silenced = "${var.io_consumption_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -108,15 +102,13 @@ resource "datadog_monitor" "mysql_memory_usage" {
) > ${var.memory_usage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.memory_usage_threshold_critical}"
warning = "${var.memory_usage_threshold_warning}"
}
silenced = "${var.memory_usage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

View File

@ -29,7 +29,6 @@ Creates DataDog monitors with the following checks:
| cpu\_usage\_enabled | Flag to enable PostgreSQL status monitor | string | `"true"` | no |
| cpu\_usage\_extra\_tags | Extra tags for PostgreSQL status monitor | list | `[]` | no |
| cpu\_usage\_message | Custom message for PostgreSQL CPU monitor | string | `""` | no |
| cpu\_usage\_silenced | Groups to mute for PostgreSQL CPU monitor | map | `{}` | no |
| cpu\_usage\_threshold\_critical | PostgreSQL CPU usage in percent (critical threshold) | string | `"90"` | no |
| cpu\_usage\_threshold\_warning | PostgreSQL CPU usage in percent (warning threshold) | string | `"80"` | no |
| cpu\_usage\_time\_aggregator | Monitor aggregator for PostgreSQL CPU [available values: min, max or avg] | string | `"min"` | no |
@ -41,7 +40,6 @@ Creates DataDog monitors with the following checks:
| free\_storage\_enabled | Flag to enable PostgreSQL status monitor | string | `"true"` | no |
| free\_storage\_extra\_tags | Extra tags for PostgreSQL status monitor | list | `[]` | no |
| free\_storage\_message | Custom message for PostgreSQL Free Storage monitor | string | `""` | no |
| free\_storage\_silenced | Groups to mute for PostgreSQL Free Storage monitor | map | `{}` | no |
| free\_storage\_threshold\_critical | PostgreSQL Free Storage remaining in percent (critical threshold) | string | `"10"` | no |
| free\_storage\_threshold\_warning | PostgreSQL Free Storage remaining in percent (warning threshold) | string | `"20"` | no |
| free\_storage\_time\_aggregator | Monitor aggregator for PostgreSQL Free Storage [available values: min, max or avg] | string | `"min"` | no |
@ -49,7 +47,6 @@ Creates DataDog monitors with the following checks:
| io\_consumption\_enabled | Flag to enable PostgreSQL status monitor | string | `"true"` | no |
| io\_consumption\_extra\_tags | Extra tags for PostgreSQL status monitor | list | `[]` | no |
| io\_consumption\_message | Custom message for PostgreSQL IO consumption monitor | string | `""` | no |
| io\_consumption\_silenced | Groups to mute for PostgreSQL IO consumption monitor | map | `{}` | no |
| io\_consumption\_threshold\_critical | PostgreSQL IO consumption in percent (critical threshold) | string | `"90"` | no |
| io\_consumption\_threshold\_warning | PostgreSQL IO consumption in percent (warning threshold) | string | `"80"` | no |
| io\_consumption\_time\_aggregator | Monitor aggregator for PostgreSQL IO consumption [available values: min, max or avg] | string | `"min"` | no |
@ -57,7 +54,6 @@ Creates DataDog monitors with the following checks:
| memory\_usage\_enabled | Flag to enable PostgreSQL status monitor | string | `"true"` | no |
| memory\_usage\_extra\_tags | Extra tags for PostgreSQL status monitor | list | `[]` | no |
| memory\_usage\_message | Custom message for PostgreSQL memory monitor | string | `""` | no |
| memory\_usage\_silenced | Groups to mute for PostgreSQL memory monitor | map | `{}` | no |
| memory\_usage\_threshold\_critical | PostgreSQL memory usage in percent (critical threshold) | string | `"90"` | no |
| memory\_usage\_threshold\_warning | PostgreSQL memory usage in percent (warning threshold) | string | `"80"` | no |
| memory\_usage\_time\_aggregator | Monitor aggregator for PostgreSQL memory [available values: min, max or avg] | string | `"min"` | no |
@ -67,7 +63,6 @@ Creates DataDog monitors with the following checks:
| no\_connection\_enabled | Flag to enable PostgreSQL status monitor | string | `"true"` | no |
| no\_connection\_extra\_tags | Extra tags for PostgreSQL status monitor | list | `[]` | no |
| no\_connection\_message | Custom message for PostgreSQL no connection monitor | string | `""` | no |
| no\_connection\_silenced | Groups to mute for PostgreSQL no connection monitor | map | `{}` | no |
| no\_connection\_time\_aggregator | Monitor aggregator for PostgreSQL no connection [available values: min, max or avg] | string | `"min"` | no |
| no\_connection\_timeframe | Monitor timeframe for PostgreSQL no connection [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |
| prefix\_slug | Prefix string to prepend between brackets on every monitors names | string | `""` | no |

View File

@ -35,11 +35,6 @@ variable "filter_tags_custom" {
}
# Azure Databases for PostgreSQL Servers specific variables
variable "cpu_usage_silenced" {
description = "Groups to mute for PostgreSQL CPU monitor"
type = "map"
default = {}
}
variable "cpu_usage_enabled" {
description = "Flag to enable PostgreSQL status monitor"
@ -81,12 +76,6 @@ variable "cpu_usage_threshold_critical" {
default = "90"
}
variable "no_connection_silenced" {
description = "Groups to mute for PostgreSQL no connection monitor"
type = "map"
default = {}
}
variable "no_connection_enabled" {
description = "Flag to enable PostgreSQL status monitor"
type = "string"
@ -117,12 +106,6 @@ variable "no_connection_timeframe" {
default = "last_5m"
}
variable "free_storage_silenced" {
description = "Groups to mute for PostgreSQL Free Storage monitor"
type = "map"
default = {}
}
variable "free_storage_enabled" {
description = "Flag to enable PostgreSQL status monitor"
type = "string"
@ -163,12 +146,6 @@ variable "free_storage_threshold_critical" {
default = "10"
}
variable "io_consumption_silenced" {
description = "Groups to mute for PostgreSQL IO consumption monitor"
type = "map"
default = {}
}
variable "io_consumption_enabled" {
description = "Flag to enable PostgreSQL status monitor"
type = "string"
@ -209,12 +186,6 @@ variable "io_consumption_threshold_critical" {
default = "90"
}
variable "memory_usage_silenced" {
description = "Groups to mute for PostgreSQL memory monitor"
type = "map"
default = {}
}
variable "memory_usage_enabled" {
description = "Flag to enable PostgreSQL status monitor"
type = "string"

View File

@ -9,15 +9,13 @@ resource "datadog_monitor" "postgresql_cpu_usage" {
) > ${var.cpu_usage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.cpu_usage_threshold_critical}"
warning = "${var.cpu_usage_threshold_warning}"
}
silenced = "${var.cpu_usage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -42,9 +40,7 @@ resource "datadog_monitor" "postgresql_no_connection" {
) < 1
EOQ
type = "metric alert"
silenced = "${var.no_connection_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
@ -70,15 +66,13 @@ resource "datadog_monitor" "postgresql_free_storage" {
) < ${var.free_storage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.free_storage_threshold_critical}"
warning = "${var.free_storage_threshold_warning}"
}
silenced = "${var.free_storage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -103,15 +97,13 @@ resource "datadog_monitor" "postgresql_io_consumption" {
) > ${var.io_consumption_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.io_consumption_threshold_critical}"
warning = "${var.io_consumption_threshold_warning}"
}
silenced = "${var.io_consumption_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -136,15 +128,13 @@ resource "datadog_monitor" "postgresql_memory_usage" {
) > ${var.memory_usage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
critical = "${var.memory_usage_threshold_critical}"
warning = "${var.memory_usage_threshold_warning}"
}
silenced = "${var.memory_usage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

View File

@ -30,7 +30,6 @@ Creates DataDog monitors with the following checks:
| evictedkeys\_limit\_enabled | Flag to enable Redis evicted keys monitor | string | `"true"` | no |
| evictedkeys\_limit\_extra\_tags | Extra tags for Redis evicted keys monitor | list | `[]` | no |
| evictedkeys\_limit\_message | Custom message for Redis evicted keys monitor | string | `""` | no |
| evictedkeys\_limit\_silenced | Groups to mute for Redis evicted keys monitor | map | `{}` | no |
| evictedkeys\_limit\_threshold\_critical | Evicted keys limit (critical threshold) | string | `"100"` | no |
| evictedkeys\_limit\_threshold\_warning | Evicted keys limit (warning threshold) | string | `"0"` | no |
| evictedkeys\_limit\_time\_aggregator | Monitor aggregator for Redis evicted keys [available values: min, max or avg] | string | `"avg"` | no |
@ -43,7 +42,6 @@ Creates DataDog monitors with the following checks:
| percent\_processor\_time\_enabled | Flag to enable Redis processor monitor | string | `"true"` | no |
| percent\_processor\_time\_extra\_tags | Extra tags for Redis processor monitor | list | `[]` | no |
| percent\_processor\_time\_message | Custom message for Redis processor monitor | string | `""` | no |
| percent\_processor\_time\_silenced | Groups to mute for Redis processor monitor | map | `{}` | no |
| percent\_processor\_time\_threshold\_critical | Processor time percent (critical threshold) | string | `"80"` | no |
| percent\_processor\_time\_threshold\_warning | Processor time percent (warning threshold) | string | `"60"` | no |
| percent\_processor\_time\_time\_aggregator | Monitor aggregator for Redis processor [available values: min, max or avg] | string | `"min"` | no |
@ -52,7 +50,6 @@ Creates DataDog monitors with the following checks:
| server\_load\_rate\_enabled | Flag to enable Redis server load monitor | string | `"true"` | no |
| server\_load\_rate\_extra\_tags | Extra tags for Redis server load monitor | list | `[]` | no |
| server\_load\_rate\_message | Custom message for Redis server load monitor | string | `""` | no |
| server\_load\_rate\_silenced | Groups to mute for Redis server load monitor | map | `{}` | no |
| server\_load\_rate\_threshold\_critical | Server CPU load rate (critical threshold) | string | `"90"` | no |
| server\_load\_rate\_threshold\_warning | Server CPU load rate (warning threshold) | string | `"70"` | no |
| server\_load\_rate\_time\_aggregator | Monitor aggregator for Redis server load [available values: min, max or avg] | string | `"min"` | no |
@ -60,7 +57,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable Redis status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for Redis status monitor | list | `[]` | no |
| status\_message | Custom message for Redis status monitor | string | `""` | no |
| status\_silenced | Groups to mute for Redis status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for Redis status [available values: min, max or avg] | string | `"max"` | no |
| status\_timeframe | Monitor timeframe for Redis status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -40,11 +40,6 @@ variable "filter_tags_custom_excluded" {
}
# Azure Redis specific variables
variable "status_silenced" {
description = "Groups to mute for Redis status monitor"
type = "map"
default = {}
}
variable "status_enabled" {
description = "Flag to enable Redis status monitor"
@ -76,12 +71,6 @@ variable "status_timeframe" {
default = "last_5m"
}
variable "evictedkeys_limit_silenced" {
description = "Groups to mute for Redis evicted keys monitor"
type = "map"
default = {}
}
variable "evictedkeys_limit_enabled" {
description = "Flag to enable Redis evicted keys monitor"
type = "string"
@ -122,12 +111,6 @@ variable "evictedkeys_limit_threshold_critical" {
default = 100
}
variable "percent_processor_time_silenced" {
description = "Groups to mute for Redis processor monitor"
type = "map"
default = {}
}
variable "percent_processor_time_enabled" {
description = "Flag to enable Redis processor monitor"
type = "string"
@ -168,12 +151,6 @@ variable "percent_processor_time_threshold_warning" {
default = 60
}
variable "server_load_rate_silenced" {
description = "Groups to mute for Redis server load monitor"
type = "map"
default = {}
}
variable "server_load_rate_enabled" {
description = "Flag to enable Redis server load monitor"
type = "string"

View File

@ -9,9 +9,7 @@ resource "datadog_monitor" "status" {
) != 1
EOQ
type = "metric alert"
silenced = "${var.status_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
@ -37,15 +35,13 @@ resource "datadog_monitor" "evictedkeys" {
) > ${var.evictedkeys_limit_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.evictedkeys_limit_threshold_warning}"
critical = "${var.evictedkeys_limit_threshold_critical}"
}
silenced = "${var.evictedkeys_limit_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -70,15 +66,13 @@ resource "datadog_monitor" "percent_processor_time" {
) > ${var.percent_processor_time_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.percent_processor_time_threshold_warning}"
critical = "${var.percent_processor_time_threshold_critical}"
}
silenced = "${var.percent_processor_time_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -103,15 +97,13 @@ resource "datadog_monitor" "server_load" {
) > ${var.server_load_rate_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.server_load_rate_threshold_warning}"
critical = "${var.server_load_rate_threshold_critical}"
}
silenced = "${var.server_load_rate_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

View File

@ -27,7 +27,6 @@ Creates DataDog monitors with the following checks:
| cpu\_percentage\_enabled | Flag to enable the serverfarms cpu_percentage monitor | string | `"true"` | no |
| cpu\_percentage\_extra\_tags | Extra tags for serverfarms cpu_percentage monitor | list | `[]` | no |
| cpu\_percentage\_message | Custom message for serverfarm cpu_percentage monitor | string | `""` | no |
| cpu\_percentage\_silenced | Groups to mute for serverfarm cpu_percentage monitor | map | `{}` | no |
| cpu\_percentage\_threshold\_critical | CPU percentage (critical threshold) | string | `"95"` | no |
| cpu\_percentage\_threshold\_warning | CPU percentage (warning threshold) | string | `"90"` | no |
| cpu\_percentage\_time\_aggregator | Monitor aggregator for serverfarms cpu_percentage [available values: min, max or avg] | string | `"min"` | no |
@ -39,7 +38,6 @@ Creates DataDog monitors with the following checks:
| memory\_percentage\_enabled | Flag to enable the serverfarms memory_percentage monitor | string | `"true"` | no |
| memory\_percentage\_extra\_tags | Extra tags for serverfarms memory_percentage monitor | list | `[]` | no |
| memory\_percentage\_message | Custom message for serverfarm memory_percentage monitor | string | `""` | no |
| memory\_percentage\_silenced | Groups to mute for serverfarm memory_percentage monitor | map | `{}` | no |
| memory\_percentage\_threshold\_critical | Memory percentage (critical threshold) | string | `"95"` | no |
| memory\_percentage\_threshold\_warning | Memory percentage (warning threshold) | string | `"90"` | no |
| memory\_percentage\_time\_aggregator | Monitor aggregator for serverfarms memory_percentage [available values: min, max or avg] | string | `"min"` | no |
@ -50,7 +48,6 @@ Creates DataDog monitors with the following checks:
| status\_enabled | Flag to enable the serverfarms status monitor | string | `"true"` | no |
| status\_extra\_tags | Extra tags for serverfarms status monitor | list | `[]` | no |
| status\_message | Custom message for serverfarm status monitor | string | `""` | no |
| status\_silenced | Groups to mute for serverfarm status monitor | map | `{}` | no |
| status\_time\_aggregator | Monitor aggregator for serverfarms status [available values: min, max or avg] | string | `"min"` | no |
| status\_timeframe | Monitor timeframe for serverfarms status [available values: `last_#m` (1, 5, 10, 15, or 30), `last_#h` (1, 2, or 4), or `last_1d`] | string | `"last_5m"` | no |

View File

@ -49,12 +49,6 @@ variable "status_message" {
default = ""
}
variable "status_silenced" {
type = "map"
description = "Groups to mute for serverfarm status monitor"
default = {}
}
variable "status_extra_tags" {
description = "Extra tags for serverfarms status monitor"
type = "list"
@ -86,12 +80,6 @@ variable "cpu_percentage_message" {
default = ""
}
variable "cpu_percentage_silenced" {
type = "map"
description = "Groups to mute for serverfarm cpu_percentage monitor"
default = {}
}
variable "cpu_percentage_extra_tags" {
description = "Extra tags for serverfarms cpu_percentage monitor"
type = "list"
@ -133,12 +121,6 @@ variable "memory_percentage_message" {
default = ""
}
variable "memory_percentage_silenced" {
type = "map"
description = "Groups to mute for serverfarm memory_percentage monitor"
default = {}
}
variable "memory_percentage_extra_tags" {
description = "Extra tags for serverfarms memory_percentage monitor"
type = "list"

View File

@ -9,9 +9,7 @@ resource "datadog_monitor" "status" {
) != 1
EOQ
type = "metric alert"
silenced = "${var.status_silenced}"
type = "query alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
@ -37,15 +35,13 @@ resource "datadog_monitor" "cpu_percentage" {
) > ${var.cpu_percentage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.cpu_percentage_threshold_warning}"
critical = "${var.cpu_percentage_threshold_critical}"
}
silenced = "${var.cpu_percentage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
@ -70,15 +66,13 @@ resource "datadog_monitor" "memory_percentage" {
) > ${var.memory_percentage_threshold_critical}
EOQ
type = "metric alert"
type = "query alert"
thresholds {
warning = "${var.memory_percentage_threshold_warning}"
critical = "${var.memory_percentage_threshold_critical}"
}
silenced = "${var.memory_percentage_silenced}"
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0

Some files were not shown because too many files have changed in this diff Show More