MON-96 - Update ELB with new best practices and follow recommendations

This commit is contained in:
Alexandre Gaillet 2018-02-19 19:25:07 +01:00
parent 6979212745
commit ce55b16b86
4 changed files with 187 additions and 95 deletions

43
cloud/aws/elb/README.md Normal file
View File

@ -0,0 +1,43 @@
AWS ELB DataDog monitors
========================
How to use this module
----------------------
```
module "datadog-monitors-aws-elb" {
source = "git::ssh://git@bitbucket.org/morea/terraform.feature.datadog.git//cloud/aws/elb?ref={revision}"
environment = "${var.environment}"
message = "${module.datadog-message-alerting.alerting-message}"
}
```
Purpose
-------
Creates DataDog monitors with the following checks :
* ELB no healthy hosts
* ELB latency too high
* ELB http code 5xx percent to high
* ELB http code 4xx percent to high
Inputs
------
| Name | Description | Type | Default | Required |
|------|-------------|:----:|:-----:|:-----:|
| dd_aws_elb | # ELB | string | `disable` | no |
| elb_4xx_threshold_critical | loadbalancer 4xx critical threshold in percentage | string | `10` | no |
| elb_4xx_threshold_warning | loadbalancer 4xx warning threshold in percentage | string | `5` | no |
| elb_5xx_threshold_critical | loadbalancer 5xx critical threshold in percentage | string | `10` | no |
| elb_5xx_threshold_warning | loadbalancer 5xx warning threshold in percentage | string | `5` | no |
| elb_backend_latency_critical | latency critical threshold in seconds | string | `5` | no |
| elb_backend_latency_warning | latency warning threshold in seconds | string | `1` | no |
| elb_notify_no_data | Use this variable to disable notify no data | string | `true` | no |
| environment | Architecture Environment | string | - | yes |
| evaluation_delay | Delay in seconds for the metric evaluation | string | `600` | no |
| filter_tags_custom | Tags used for custom filtering when filter_tags_use_defaults is false | string | `*` | no |
| filter_tags_use_defaults | Use default filter tags convention | string | `true` | no |
| message | Message sent when an alert is triggered | string | - | yes |

View File

@ -1 +0,0 @@
../../../inputs.tf

65
cloud/aws/elb/inputs.tf Normal file
View File

@ -0,0 +1,65 @@
# Global Terraform
variable "environment" {
description = "Architecture Environment"
type = "string"
}
# Global DataDog
variable "evaluation_delay" {
description = "Delay in seconds for the metric evaluation"
default = 600
}
variable "message" {
description = "Message sent when an alert is triggered"
}
variable "filter_tags_use_defaults" {
description = "Use default filter tags convention"
default = "true"
}
variable "filter_tags_custom" {
description = "Tags used for custom filtering when filter_tags_use_defaults is false"
default = "*"
}
## ELB
variable "dd_aws_elb" {
default = "disable"
}
variable "elb_notify_no_data" {
description = "Use this variable to disable notify no data"
default = true
}
variable "elb_5xx_threshold_warning" {
description = "loadbalancer 5xx warning threshold in percentage"
default = 5
}
variable "elb_5xx_threshold_critical" {
description = "loadbalancer 5xx critical threshold in percentage"
default = 10
}
variable "elb_4xx_threshold_warning" {
description = "loadbalancer 4xx warning threshold in percentage"
default = 5
}
variable "elb_4xx_threshold_critical" {
description = "loadbalancer 4xx critical threshold in percentage"
default = 10
}
variable "elb_backend_latency_warning" {
description = "latency warning threshold in seconds"
default = 1
}
variable "elb_backend_latency_critical" {
description = "latency critical threshold in seconds"
default = 5
}

View File

@ -2,131 +2,153 @@ data "template_file" "filter" {
template = "$${filter}"
vars {
filter = "${var.filter_tags_use_defaults == "true" ? format("dd_monitoring:enabled,dd_aws_elb:enabled,env:%s", var.env) : "${var.filter_tags_custom}"}"
filter = "${var.filter_tags_use_defaults == "true" ? format("dd_monitoring:enabled,dd_aws_elb:enabled,env:%s", var.environment) : "${var.filter_tags_custom}"}"
}
}
resource "datadog_monitor" "ELB_no_healthy_instances" {
name = "[${var.env}] ELB no healthy instances on {{host.identifier}}"
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB no healthy instances on {{host.identifier}}"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m):avg:aws.elb.healthy_host_count{${data.template_file.filter.rendered}} by {loadbalancername,region} == 0"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.healthy_host_count{${data.template_file.filter.rendered}} by {loadbalancername,region}
) == 0
EOF
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
type = "metric alert"
notify_no_data = "${var.elb_notify_no_data}"
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["env:${var.env}", "resource:elb", "team:aws", "provider:aws"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_unhealthy_instances" {
name = "[${var.env}] ELB some unhealthy instances on {{host.identifier}}"
message = "{{#is_alert}}\n${var.ho_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.ho_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB some unhealthy instances on {{host.identifier}}"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m):avg:aws.elb.un_healthy_host_count{${data.template_file.filter.rendered}} by {loadbalancername,region} > 0"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.un_healthy_host_count{${data.template_file.filter.rendered}} by {loadbalancername,region}
) > 0"
EOF
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
type = "metric alert"
notify_no_data = "${var.elb_notify_no_data}"
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["env:${var.env}", "resource:elb", "team:aws", "provider:aws"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_too_much_5xx_backend" {
name = "[${var.env}] ELB too much 5xx backend err on {{host.identifier}}"
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB too much 5xx backend err on {{host.identifier}}"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m): avg:aws.elb.httpcode_backend_5xx{${data.template_file.filter.rendered}} by {loadbalancername,region} / avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {loadbalancername,region} * 100 > ${var.elb_5xx_threshold["critical"]}"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.httpcode_backend_5xx{${data.template_file.filter.rendered}} by {loadbalancername,region} /
avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {loadbalancername,region}
) * 100 > ${var.elb_5xx_threshold_critical}"
EOF
type = "metric alert"
thresholds {
warning = "${var.elb_5xx_threshold["warning"]}"
critical = "${var.elb_5xx_threshold["critical"]}"
warning = "${var.elb_5xx_threshold_warning}"
critical = "${var.elb_5xx_threshold_critical}"
}
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
notify_no_data = "${var.elb_notify_no_data}"
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["env:${var.env}", "resource:elb", "team:aws", "provider:aws"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_too_much_4xx_backend" {
name = "[${var.env}] ELB too much 4xx backend err on {{host.identifier}}"
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB too much 4xx backend err on {{host.identifier}}"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m): avg:aws.elb.httpcode_backend_4xx{${data.template_file.filter.rendered}} by {loadbalancername,region} / avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {loadbalancername,region} * 100 > ${var.elb_4xx_threshold["critical"]}"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.httpcode_backend_4xx{${data.template_file.filter.rendered}} by {loadbalancername,region} /
avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {loadbalancername,region}
) * 100 > ${var.elb_4xx_threshold_critical}"
EOF
type = "metric alert"
thresholds {
warning = "${var.elb_4xx_threshold["warning"]}"
critical = "${var.elb_4xx_threshold["critical"]}"
warning = "${var.elb_4xx_threshold_warning}"
critical = "${var.elb_4xx_threshold_critical}"
}
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
notify_no_data = "${var.elb_notify_no_data}"
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["env:${var.env}", "resource:elb", "team:aws", "provider:aws"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_backend_latency" {
name = "[${var.env}] ELB latency to high on {{host.identifier}}"
message = "{{#is_alert}}\n${var.ho_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.ho_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB latency to high on {{host.identifier}}"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m):avg:aws.elb.latency{${data.template_file.filter.rendered}} by {loadbalancername,region} > ${var.elb_backend_latency["critical"]}"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.latency{${data.template_file.filter.rendered}} by {loadbalancername,region}
) > ${var.elb_backend_latency_critical}}"
EOF
type = "metric alert"
thresholds {
warning = "${var.elb_backend_latency["warning"]}"
critical = "${var.elb_backend_latency["critical"]}"
warning = "${var.elb_backend_latency_warning}"
critical = "${var.elb_backend_latency_critical}"
}
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
notify_no_data = "${var.elb_notify_no_data}"
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["env:${var.env}", "resource:elb", "team:aws", "provider:aws"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}

View File

@ -77,43 +77,6 @@ variable "rds_mem_threshold" {
}
}
## ELB
variable "dd_aws_elb" {
default = "disable"
}
variable "elb_config" {
type = "map"
default = {
notify_no_data = false
delay = 900
}
}
variable "elb_5xx_threshold" {
default = {
warning = 5
critical = 10
}
}
variable "elb_4xx_threshold" {
default = {
warning = 5
critical = 10
}
}
variable "elb_backend_latency" {
description = "Average time elapsed after the request leaves the load balancer until a response is received. In seconds"
default = {
warning = 1
critical = 5
}
}
##apache nginx php
variable "dd_nginx" {
default = "disabled"