MON-80 Add subscription_id

This commit is contained in:
Alexandre Gaillet 2017-10-30 17:30:16 +01:00
parent effaaf0e12
commit 5136dd5c4d
2 changed files with 22 additions and 20 deletions

View File

@ -4,6 +4,8 @@ variable "stack" {}
variable "client_name" {} variable "client_name" {}
variable "subscription_id" {}
variable "delay" { variable "delay" {
default = 600 default = 600
} }

View File

@ -2,7 +2,7 @@ resource "datadog_monitor" "too_many_jobs_failed" {
name = "[${var.environment}] Too many jobs failed on {{name}} " name = "[${var.environment}] Too many jobs failed on {{name}} "
message = "${var.jobs_failed_message}" message = "${var.jobs_failed_message}"
query = "sum(last_5m):( avg:azure.devices_iothubs.jobs.failed{*} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.jobs.failed{*} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.jobs.completed{*} by {name,resource_group}.as_count() ) ) * 100 > ${var.jobs_failed_threshold_critical}" query = "sum(last_5m):( avg:azure.devices_iothubs.jobs.failed{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.jobs.failed{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.jobs.completed{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() ) ) * 100 > ${var.jobs_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -12,7 +12,7 @@ resource "datadog_monitor" "too_many_jobs_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -26,7 +26,7 @@ resource "datadog_monitor" "too_many_list_jobs_failed" {
name = "[${var.environment}] Too many list_jobs failure on {{name}} " name = "[${var.environment}] Too many list_jobs failure on {{name}} "
message = "${var.listjobs_failed_message}" message = "${var.listjobs_failed_message}"
query = "sum(last_5m):( avg:azure.devices_iothubs.jobs.list_jobs.failure{*} by {resource_group,name}.as_count() / ( avg:azure.devices_iothubs.jobs.list_jobs.success{*} by {resource_group,name}.as_count() + avg:azure.devices_iothubs.jobs.list_jobs.failure{*} by {resource_group,name}.as_count() ) ) * 100 > ${var.listjobs_failed_threshold_critical}" query = "sum(last_5m):( avg:azure.devices_iothubs.jobs.list_jobs.failure{subscription_id:${var.subscription_id}} by {resource_group,name}.as_count() / ( avg:azure.devices_iothubs.jobs.list_jobs.success{subscription_id:${var.subscription_id}} by {resource_group,name}.as_count() + avg:azure.devices_iothubs.jobs.list_jobs.failure{subscription_id:${var.subscription_id}} by {resource_group,name}.as_count() ) ) * 100 > ${var.listjobs_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -36,7 +36,7 @@ resource "datadog_monitor" "too_many_list_jobs_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -50,7 +50,7 @@ resource "datadog_monitor" "too_many_query_jobs_failed" {
name = "[${var.environment}] Too many query_jobs failed on {{name}} " name = "[${var.environment}] Too many query_jobs failed on {{name}} "
message = "${var.queryjobs_failed_message}" message = "${var.queryjobs_failed_message}"
query = "sum(last_5m):( avg:azure.devices_iothubs.jobs.query_jobs.failure{*} by {resource_group,name}.as_count() / ( avg:azure.devices_iothubs.jobs.query_jobs.success{*} by {resource_group,name}.as_count() + avg:azure.devices_iothubs.jobs.query_jobs.failure{*} by {resource_group,name}.as_count() ) ) * 100 > ${var.queryjobs_failed_threshold_critical}" query = "sum(last_5m):( avg:azure.devices_iothubs.jobs.query_jobs.failure{subscription_id:${var.subscription_id}} by {resource_group,name}.as_count() / ( avg:azure.devices_iothubs.jobs.query_jobs.success{subscription_id:${var.subscription_id}} by {resource_group,name}.as_count() + avg:azure.devices_iothubs.jobs.query_jobs.failure{subscription_id:${var.subscription_id}} by {resource_group,name}.as_count() ) ) * 100 > ${var.queryjobs_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -60,7 +60,7 @@ resource "datadog_monitor" "too_many_query_jobs_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -74,12 +74,12 @@ resource "datadog_monitor" "status" {
name = "[${var.environment}] Status is not ok on {{name}} " name = "[${var.environment}] Status is not ok on {{name}} "
message = "${var.status_message}" message = "${var.status_message}"
query = "avg(last_5m):avg:azure.devices_iothubs.status{*} by {name,resource_group} < 1" query = "avg(last_5m):avg:azure.devices_iothubs.status{subscription_id:${var.subscription_id}} by {name,resource_group} < 1"
type = "query alert" type = "query alert"
notify_no_data = true notify_no_data = true
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -93,12 +93,12 @@ resource "datadog_monitor" "total_devices" {
name = "[${var.environment}] Total devices is wrong on {{name}} " name = "[${var.environment}] Total devices is wrong on {{name}} "
message = "${var.total_devices_message}" message = "${var.total_devices_message}"
query = "avg(last_5m):avg:azure.devices_iothubs.devices.total_devices{*} by {name,resource_group} == 0" query = "avg(last_5m):avg:azure.devices_iothubs.devices.total_devices{subscription_id:${var.subscription_id}} by {name,resource_group} == 0"
type = "query alert" type = "query alert"
notify_no_data = true notify_no_data = true
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -112,7 +112,7 @@ resource "datadog_monitor" "too_many_c2d_methods_failed" {
name = "[${var.environment}] Too many c2d methods failure on {{name}} " name = "[${var.environment}] Too many c2d methods failure on {{name}} "
message = "${var.c2d_methods_failed_message}" message = "${var.c2d_methods_failed_message}"
query = "avg(last_5m):( avg:azure.devices_iothubs.c2d.methods.failure{*} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.c2d.methods.failure{*} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.c2d.methods.success{*} by {name,resource_group}.as_count() ) ) * 100 > ${var.c2d_methods_failed_threshold_critical}" query = "avg(last_5m):( avg:azure.devices_iothubs.c2d.methods.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.c2d.methods.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.c2d.methods.success{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() ) ) * 100 > ${var.c2d_methods_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -122,7 +122,7 @@ resource "datadog_monitor" "too_many_c2d_methods_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -136,7 +136,7 @@ resource "datadog_monitor" "too_many_c2d_twin_read_failed" {
name = "[${var.environment}] Too many c2d twin read failure on {{name}} " name = "[${var.environment}] Too many c2d twin read failure on {{name}} "
message = "${var.c2d_twin_read_failed_message}" message = "${var.c2d_twin_read_failed_message}"
query = "avg(last_5m):( avg:azure.devices_iothubs.c2d.twin.read.failure{*} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.c2d.twin.read.failure{*} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.c2d.twin.read.success{*} by {name,resource_group}.as_count() ) ) * 100 > ${var.c2d_twin_read_failed_threshold_critical}" query = "avg(last_5m):( avg:azure.devices_iothubs.c2d.twin.read.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.c2d.twin.read.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.c2d.twin.read.success{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() ) ) * 100 > ${var.c2d_twin_read_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -146,7 +146,7 @@ resource "datadog_monitor" "too_many_c2d_twin_read_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -160,7 +160,7 @@ resource "datadog_monitor" "too_many_c2d_twin_update_failed" {
name = "[${var.environment}] Too many c2d twin update failure on {{name}} " name = "[${var.environment}] Too many c2d twin update failure on {{name}} "
message = "${var.c2d_twin_update_failed_message}" message = "${var.c2d_twin_update_failed_message}"
query = "avg(last_5m):( avg:azure.devices_iothubs.c2d.twin.update.failure{*} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.c2d.twin.update.failure{*} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.c2d.twin.update.success{*} by {name,resource_group}.as_count() ) ) * 100 > ${var.c2d_twin_update_failed_threshold_critical}" query = "avg(last_5m):( avg:azure.devices_iothubs.c2d.twin.update.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.c2d.twin.update.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.c2d.twin.update.success{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() ) ) * 100 > ${var.c2d_twin_update_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -170,7 +170,7 @@ resource "datadog_monitor" "too_many_c2d_twin_update_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -184,7 +184,7 @@ resource "datadog_monitor" "too_many_d2c_twin_read_failed" {
name = "[${var.environment}] Too many d2c twin read failure on {{name}} " name = "[${var.environment}] Too many d2c twin read failure on {{name}} "
message = "${var.d2c_twin_read_failed_message}" message = "${var.d2c_twin_read_failed_message}"
query = "avg(last_5m):( avg:azure.devices_iothubs.d2c.twin.read.failure{*} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.d2c.twin.read.failure{*} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.d2c.twin.read.success{*} by {name,resource_group}.as_count() ) ) * 100 > ${var.d2c_twin_read_failed_threshold_critical}" query = "avg(last_5m):( avg:azure.devices_iothubs.d2c.twin.read.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.d2c.twin.read.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.d2c.twin.read.success{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() ) ) * 100 > ${var.d2c_twin_read_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -194,7 +194,7 @@ resource "datadog_monitor" "too_many_d2c_twin_read_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true
@ -208,7 +208,7 @@ resource "datadog_monitor" "too_many_d2c_twin_update_failed" {
name = "[${var.environment}] Too many d2c twin update failure on {{name}} " name = "[${var.environment}] Too many d2c twin update failure on {{name}} "
message = "${var.d2c_twin_update_failed_message}" message = "${var.d2c_twin_update_failed_message}"
query = "avg(last_5m):( avg:azure.devices_iothubs.d2c.twin.update.failure{*} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.d2c.twin.update.failure{*} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.d2c.twin.update.success{*} by {name,resource_group}.as_count() ) ) * 100 > ${var.d2c_twin_update_failed_threshold_critical}" query = "avg(last_5m):( avg:azure.devices_iothubs.d2c.twin.update.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() / ( avg:azure.devices_iothubs.d2c.twin.update.failure{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() + avg:azure.devices_iothubs.d2c.twin.update.success{subscription_id:${var.subscription_id}} by {name,resource_group}.as_count() ) ) * 100 > ${var.d2c_twin_update_failed_threshold_critical}"
type = "query alert" type = "query alert"
thresholds { thresholds {
@ -218,7 +218,7 @@ resource "datadog_monitor" "too_many_d2c_twin_update_failed" {
notify_no_data = false notify_no_data = false
evaluation_delay = "${var.delay}" evaluation_delay = "${var.delay}"
renotify_interval = 60 renotify_interval = 0
notify_audit = false notify_audit = false
timeout_h = 0 timeout_h = 0
include_tags = true include_tags = true