MON-43 add ELB monitors
This commit is contained in:
parent
e6530c5943
commit
164b8b5f5b
23
inputs.tf
23
inputs.tf
@ -60,8 +60,21 @@ variable "elb_config" {
|
||||
delay = 900
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
variable "elb_5xx_threshold" {
|
||||
default = {
|
||||
warning = 5
|
||||
critical = 10
|
||||
}
|
||||
}
|
||||
variable "elb_4xx_threshold" {
|
||||
default = {
|
||||
warning = 5
|
||||
critical = 10
|
||||
}
|
||||
}
|
||||
variable "elb_backend_latency" {
|
||||
default = {
|
||||
warning = 1000
|
||||
critical = 5000
|
||||
}
|
||||
}
|
||||
@ -8,7 +8,7 @@ resource "datadog_monitor" "rds-mysql_cpu_80_15min" {
|
||||
count = "${var.dd_aws_rds == "enabled" ? 1 : 0 }"
|
||||
|
||||
|
||||
query = "avg(last_15m):avg:aws.rds.cpuutilization{dd_monitoring:enabled,dd_aws_rds:enabled,env:${var.env},!dd_custom_rds-mysql:enabled} by {identifier,region} > 90"
|
||||
query = "avg(last_15m):avg:aws.rds.cpuutilization{dd_monitoring:enabled,dd_aws_rds:enabled,env:${var.env}} by {name,region} > 90"
|
||||
type = "query alert"
|
||||
|
||||
thresholds {
|
||||
@ -33,7 +33,7 @@ resource "datadog_monitor" "mysql_rds_free_space_low" {
|
||||
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}\n{{#is_warning}}\n${var.ho_escalation_group} \n{{/is_warning}} \n{{#is_warning_recovery}}\n${var.ho_escalation_group}\n{{/is_warning_recovery}}"
|
||||
|
||||
type = "query alert"
|
||||
query = "avg(last_15m): avg:aws.rds.free_storage_space{dd_monitoring:enabled,dd_aws_rds:enabled,env:${var.env},!dd_custom_rds-mysql:enabled} by {identifier,region} / avg:aws.rds.total_storage_space{dd_monitoring:enabled,dd_rds-mysql_basics:enabled,env:${var.env},!dd_custom_rds-mysql:enabled} by {identifier,region} * 100 < 10"
|
||||
query = "avg(last_15m): avg:aws.rds.free_storage_space{dd_monitoring:enabled,dd_aws_rds:enabled,env:${var.env}} by {name,region} / avg:aws.rds.total_storage_space{dd_monitoring:enabled,dd_rds-mysql_basics:enabled,env:${var.env},!dd_custom_rds-mysql:enabled} by {identifier,region} * 100 < 10"
|
||||
count = "${var.dd_aws_rds == "enabled" ? 1 : 0 }"
|
||||
|
||||
|
||||
|
||||
@ -3,7 +3,7 @@ resource "datadog_monitor" "ELB_no_healthy_instances" {
|
||||
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
|
||||
|
||||
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
|
||||
query = "avg(last_5m):avg:aws.elb.healthy_host_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {identifier,region} == 0"
|
||||
query = "avg(last_5m):avg:aws.elb.healthy_host_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} == 0"
|
||||
type = "query alert"
|
||||
|
||||
notify_no_data = "${var.elb_config["notify_no_data"]}"
|
||||
@ -23,7 +23,7 @@ resource "datadog_monitor" "ELB_unhealthy_instances" {
|
||||
message = "{{#is_alert}}\n${var.ho_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.ho_escalation_group}\n{{/is_recovery}}"
|
||||
|
||||
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
|
||||
query = "avg(last_5m):avg:aws.elb.healthy_host_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {identifier,region} == 0"
|
||||
query = "avg(last_5m):avg:aws.elb.un_healthy_host_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} > 0"
|
||||
type = "query alert"
|
||||
|
||||
notify_no_data = "${var.elb_config["notify_no_data"]}"
|
||||
@ -38,4 +38,79 @@ resource "datadog_monitor" "ELB_unhealthy_instances" {
|
||||
no_data_timeframe = 20
|
||||
}
|
||||
|
||||
resource "datadog_monitor" "ELB_too_much_5xx_backend" {
|
||||
name = "[${var.env}] ELB too much 5xx backend err on {{host.identifier}}"
|
||||
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
|
||||
|
||||
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
|
||||
query = "avg(last_5m): avg:aws.elb.httpcode_backend_5xx{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} / avg:aws.elb.request_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} * 100 > ${var.elb_5xx_threshold["critical"]}"
|
||||
type = "query alert"
|
||||
|
||||
thresholds {
|
||||
warning = "${var.elb_5xx_threshold["warning"]}"
|
||||
critical = "${var.elb_5xx_threshold["critical"]}"
|
||||
}
|
||||
|
||||
notify_no_data = "${var.elb_config["notify_no_data"]}"
|
||||
evaluation_delay = "${var.elb_config["delay"]}"
|
||||
renotify_interval = 60
|
||||
notify_audit = false
|
||||
timeout_h = 0
|
||||
include_tags = true
|
||||
locked = false
|
||||
require_full_window = true
|
||||
new_host_delay = "${var.elb_config["delay"]}"
|
||||
no_data_timeframe = 20
|
||||
}
|
||||
|
||||
resource "datadog_monitor" "ELB_too_much_4xx_backend" {
|
||||
name = "[${var.env}] ELB too much 4xx backend err on {{host.identifier}}"
|
||||
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
|
||||
|
||||
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
|
||||
query = "avg(last_5m): avg:aws.elb.httpcode_backend_4xx{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} / avg:aws.elb.request_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} * 100 > ${var.elb_4xx_threshold["critical"]}"
|
||||
type = "query alert"
|
||||
|
||||
thresholds {
|
||||
warning = "${var.elb_4xx_threshold["warning"]}"
|
||||
critical = "${var.elb_4xx_threshold["critical"]}"
|
||||
}
|
||||
|
||||
notify_no_data = "${var.elb_config["notify_no_data"]}"
|
||||
evaluation_delay = "${var.elb_config["delay"]}"
|
||||
renotify_interval = 60
|
||||
notify_audit = false
|
||||
timeout_h = 0
|
||||
include_tags = true
|
||||
locked = false
|
||||
require_full_window = true
|
||||
new_host_delay = "${var.elb_config["delay"]}"
|
||||
no_data_timeframe = 20
|
||||
}
|
||||
|
||||
resource "datadog_monitor" "ELB_backend_latency" {
|
||||
name = "[${var.env}] ELB latency to high on {{host.identifier}}"
|
||||
message = "{{#is_alert}}\n${var.ho_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.ho_escalation_group}\n{{/is_recovery}}"
|
||||
|
||||
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
|
||||
query = "avg(last_5m):avg:aws.elb.latency{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} > ${var.elb_backend_latency["critical"]}"
|
||||
type = "query alert"
|
||||
|
||||
thresholds {
|
||||
warning = "${var.elb_backend_latency["warning"]}"
|
||||
critical = "${var.elb_backend_latency["critical"]}"
|
||||
}
|
||||
|
||||
notify_no_data = "${var.elb_config["notify_no_data"]}"
|
||||
evaluation_delay = "${var.elb_config["delay"]}"
|
||||
renotify_interval = 60
|
||||
notify_audit = false
|
||||
timeout_h = 0
|
||||
include_tags = true
|
||||
locked = false
|
||||
require_full_window = true
|
||||
new_host_delay = "${var.elb_config["delay"]}"
|
||||
no_data_timeframe = 20
|
||||
}
|
||||
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user