Merged in MON-93-aws-elb-custom-tags (pull request #34)

MON-96 Update elb with new best practices

Approved-by: Quentin Manfroi <quentin.manfroi@yahoo.fr>
Approved-by: Alexandre Gaillet <alexandre.gaillet@fr.clara.net>
This commit is contained in:
Ahmed Fourti 2018-02-22 13:04:17 +00:00 committed by Quentin Manfroi
commit 1223e3b26f
4 changed files with 268 additions and 110 deletions

48
cloud/aws/elb/README.md Normal file
View File

@ -0,0 +1,48 @@
AWS ELB DataDog monitors
========================
How to use this module
----------------------
```
module "datadog-monitors-aws-elb" {
source = "git::ssh://git@bitbucket.org/morea/terraform.feature.datadog.git//cloud/aws/elb?ref={revision}"
environment = "${var.environment}"
message = "${module.datadog-message-alerting.alerting-message}"
}
```
Purpose
-------
Creates DataDog monitors with the following checks :
* ELB no healthy hosts
* ELB latency too high
* ELB http code 4xx percent to high
* ELB http code 5xx percent to high
* ELB backend http code 4xx percent to high
* ELB backend http code 5xx percent to high
Inputs
------
| Name | Description | Type | Default | Required |
|------|-------------|:----:|:-----:|:-----:|
| dd_aws_elb | # ELB | string | `disable` | no |
| elb_4xx_threshold_critical | loadbalancer 4xx critical threshold in percentage | string | `10` | no |
| elb_4xx_threshold_warning | loadbalancer 4xx warning threshold in percentage | string | `5` | no |
| elb_5xx_threshold_critical | loadbalancer 5xx critical threshold in percentage | string | `10` | no |
| elb_5xx_threshold_warning | loadbalancer 5xx warning threshold in percentage | string | `5` | no |
| elb_backend_4xx_threshold_critical | loadbalancer backend 4xx critical threshold in percentage | string | `10` | no |
| elb_backend_4xx_threshold_warning | loadbalancer backend 4xx warning threshold in percentage | string | `5` | no |
| elb_backend_5xx_threshold_critical | loadbalancer backend 5xx critical threshold in percentage | string | `10` | no |
| elb_backend_5xx_threshold_warning | loadbalancer backend 5xx warning threshold in percentage | string | `5` | no |
| elb_backend_latency_critical | latency critical threshold in seconds | string | `5` | no |
| elb_backend_latency_warning | latency warning threshold in seconds | string | `1` | no |
| environment | Architecture Environment | string | - | yes |
| evaluation_delay | Delay in seconds for the metric evaluation | string | `600` | no |
| filter_tags_custom | Tags used for custom filtering when filter_tags_use_defaults is false | string | `*` | no |
| filter_tags_use_defaults | Use default filter tags convention | string | `true` | no |
| message | Message sent when an alert is triggered | string | - | yes |

View File

@ -1 +0,0 @@
../../../inputs.tf

80
cloud/aws/elb/inputs.tf Normal file
View File

@ -0,0 +1,80 @@
# Global Terraform
variable "environment" {
description = "Architecture Environment"
type = "string"
}
# Global DataDog
variable "evaluation_delay" {
description = "Delay in seconds for the metric evaluation"
default = 600
}
variable "message" {
description = "Message sent when an alert is triggered"
}
variable "filter_tags_use_defaults" {
description = "Use default filter tags convention"
default = "true"
}
variable "filter_tags_custom" {
description = "Tags used for custom filtering when filter_tags_use_defaults is false"
default = "*"
}
## ELB
variable "dd_aws_elb" {
default = "disable"
}
variable "elb_4xx_threshold_warning" {
description = "loadbalancer 4xx warning threshold in percentage"
default = 5
}
variable "elb_4xx_threshold_critical" {
description = "loadbalancer 4xx critical threshold in percentage"
default = 10
}
variable "elb_5xx_threshold_warning" {
description = "loadbalancer 5xx warning threshold in percentage"
default = 5
}
variable "elb_5xx_threshold_critical" {
description = "loadbalancer 5xx critical threshold in percentage"
default = 10
}
variable "elb_backend_4xx_threshold_warning" {
description = "loadbalancer backend 4xx warning threshold in percentage"
default = 5
}
variable "elb_backend_4xx_threshold_critical" {
description = "loadbalancer backend 4xx critical threshold in percentage"
default = 10
}
variable "elb_backend_5xx_threshold_warning" {
description = "loadbalancer backend 5xx warning threshold in percentage"
default = 5
}
variable "elb_backend_5xx_threshold_critical" {
description = "loadbalancer backend 5xx critical threshold in percentage"
default = 10
}
variable "elb_backend_latency_warning" {
description = "latency warning threshold in seconds"
default = 1
}
variable "elb_backend_latency_critical" {
description = "latency critical threshold in seconds"
default = 5
}

View File

@ -1,124 +1,192 @@
data "template_file" "filter" {
template = "$${filter}"
vars {
filter = "${var.filter_tags_use_defaults == "true" ? format("dd_monitoring:enabled,dd_aws_elb:enabled,env:%s", var.environment) : "${var.filter_tags_custom}"}"
}
}
resource "datadog_monitor" "ELB_no_healthy_instances" {
name = "[${var.env}] ELB no healthy instances on {{host.identifier}}"
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB no healthy instances"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m):avg:aws.elb.healthy_host_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} == 0"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.healthy_host_count{${data.template_file.filter.rendered}} by {region,loadbalancer}
) < 1
EOF
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
type = "metric alert"
notify_no_data = true
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
require_full_window = false
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["*"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_unhealthy_instances" {
name = "[${var.env}] ELB some unhealthy instances on {{host.identifier}}"
message = "{{#is_alert}}\n${var.ho_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.ho_escalation_group}\n{{/is_recovery}}"
resource "datadog_monitor" "ELB_too_much_4xx" {
name = "[${var.environment}] ELB 4xx errors too high {{comparator}} {{#is_alert}}{{threshold}}%{{/is_alert}}{{#is_warning}}{{warn_threshold}}%{{/is_warning}} ({{value}}%)"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m):avg:aws.elb.un_healthy_host_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} > 0"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.httpcode_elb_4xx{${data.template_file.filter.rendered}} by {region,loadbalancer} /
avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {region,loadbalancer}
) * 100 > ${var.elb_4xx_threshold_critical}"
EOF
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
no_data_timeframe = 20
tags = ["*"]
}
resource "datadog_monitor" "ELB_too_much_5xx_backend" {
name = "[${var.env}] ELB too much 5xx backend err on {{host.identifier}}"
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m): avg:aws.elb.httpcode_backend_5xx{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} / avg:aws.elb.request_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} * 100 > ${var.elb_5xx_threshold["critical"]}"
type = "query alert"
type = "metric alert"
thresholds {
warning = "${var.elb_5xx_threshold["warning"]}"
critical = "${var.elb_5xx_threshold["critical"]}"
warning = "${var.elb_4xx_threshold_warning}"
critical = "${var.elb_4xx_threshold_critical}"
}
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
require_full_window = false
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["*"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_too_much_5xx" {
name = "[${var.environment}] ELB 5xx errors too high {{comparator}} {{#is_alert}}{{threshold}}%{{/is_alert}}{{#is_warning}}{{warn_threshold}}%{{/is_warning}} ({{value}}%)"
message = "${var.message}"
query = <<EOF
avg(last_5m): (
avg:aws.elb.httpcode_elb_5xx{${data.template_file.filter.rendered}} by {region,loadbalancer} /
avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {region,loadbalancer}
) * 100 > ${var.elb_5xx_threshold_critical}"
EOF
type = "metric alert"
thresholds {
warning = "${var.elb_5xx_threshold_warning}"
critical = "${var.elb_5xx_threshold_critical}"
}
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = false
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_too_much_4xx_backend" {
name = "[${var.env}] ELB too much 4xx backend err on {{host.identifier}}"
message = "{{#is_alert}}\n${var.hno_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.hno_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB backend 4xx errors too high {{comparator}} {{#is_alert}}{{threshold}}%{{/is_alert}}{{#is_warning}}{{warn_threshold}}%{{/is_warning}} ({{value}}%)"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m): avg:aws.elb.httpcode_backend_4xx{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} / avg:aws.elb.request_count{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} * 100 > ${var.elb_4xx_threshold["critical"]}"
type = "query alert"
query = <<EOF
avg(last_5m): (
avg:aws.elb.httpcode_backend_4xx{${data.template_file.filter.rendered}} by {region,loadbalancer} /
avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {region,loadbalancer}
) * 100 > ${var.elb_backend_4xx_threshold_critical}"
EOF
type = "metric alert"
thresholds {
warning = "${var.elb_4xx_threshold["warning"]}"
critical = "${var.elb_4xx_threshold["critical"]}"
warning = "${var.elb_backend_4xx_threshold_warning}"
critical = "${var.elb_backend_4xx_threshold_critical}"
}
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
require_full_window = false
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["*"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_too_much_5xx_backend" {
name = "[${var.environment}] ELB backend 5xx errors too high {{comparator}} {{#is_alert}}{{threshold}}%{{/is_alert}}{{#is_warning}}{{warn_threshold}}%{{/is_warning}} ({{value}}%)"
message = "${var.message}"
query = <<EOF
avg(last_5m): (
avg:aws.elb.httpcode_backend_5xx{${data.template_file.filter.rendered}} by {region,loadbalancer} /
avg:aws.elb.request_count{${data.template_file.filter.rendered}} by {region,loadbalancer}
) * 100 > ${var.elb_backend_5xx_threshold_critical}"
EOF
type = "metric alert"
thresholds {
warning = "${var.elb_backend_5xx_threshold_warning}"
critical = "${var.elb_backend_5xx_threshold_critical}"
}
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = false
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}
resource "datadog_monitor" "ELB_backend_latency" {
name = "[${var.env}] ELB latency to high on {{host.identifier}}"
message = "{{#is_alert}}\n${var.ho_escalation_group} \n{{/is_alert}} \n{{#is_recovery}}\n${var.ho_escalation_group}\n{{/is_recovery}}"
name = "[${var.environment}] ELB latency too high {{comparator}} {{#is_alert}}{{threshold}}%{{/is_alert}}{{#is_warning}}{{warn_threshold}}%{{/is_warning}} ({{value}}%)"
message = "${var.message}"
count = "${var.dd_aws_elb == "enabled" ? 1 : 0 }"
query = "avg(last_5m):avg:aws.elb.latency{dd_monitoring:enabled,dd_aws_elb:enabled,env:${var.env}} by {loadbalancername,region} > ${var.elb_backend_latency["critical"]}"
type = "query alert"
query = <<EOF
min(last_5m): (
avg:aws.elb.latency{${data.template_file.filter.rendered}} by {region,loadbalancer}
) > ${var.elb_backend_latency_critical}}"
EOF
type = "metric alert"
thresholds {
warning = "${var.elb_backend_latency["warning"]}"
critical = "${var.elb_backend_latency["critical"]}"
warning = "${var.elb_backend_latency_warning}"
critical = "${var.elb_backend_latency_critical}"
}
notify_no_data = "${var.elb_config["notify_no_data"]}"
evaluation_delay = "${var.elb_config["delay"]}"
renotify_interval = 60
notify_no_data = false
evaluation_delay = "${var.evaluation_delay}"
renotify_interval = 0
notify_audit = false
timeout_h = 0
include_tags = true
locked = false
require_full_window = true
new_host_delay = "${var.elb_config["delay"]}"
require_full_window = false
new_host_delay = "${var.evaluation_delay}"
no_data_timeframe = 20
tags = ["*"]
tags = ["env:${var.environment}", "resource:elb", "team:aws", "provider:aws"]
}

View File

@ -66,43 +66,6 @@ variable "rds_mem_threshold" {
}
}
## ELB
variable "dd_aws_elb" {
default = "disable"
}
variable "elb_config" {
type = "map"
default = {
notify_no_data = false
delay = 900
}
}
variable "elb_5xx_threshold" {
default = {
warning = 5
critical = 10
}
}
variable "elb_4xx_threshold" {
default = {
warning = 5
critical = 10
}
}
variable "elb_backend_latency" {
description = "Average time elapsed after the request leaves the load balancer until a response is received. In seconds"
default = {
warning = 1
critical = 5
}
}
##apache nginx php
variable "dd_nginx" {
default = "disabled"