MON-387 Fix IoTHub nodata due to a merge issue

This commit is contained in:
Laurent Piroelle 2019-01-24 10:38:19 +01:00
parent 601339b8ba
commit a2b5908498

View File

@ -4,11 +4,12 @@ resource "datadog_monitor" "too_many_jobs_failed" {
message = "${coalesce(var.failed_jobs_rate_message, var.message)}"
query = <<EOF
${var.failed_jobs_rate_time_aggregator}(${var.failed_jobs_rate_timeframe}):(
${var.failed_jobs_rate_time_aggregator}(${var.failed_jobs_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.jobs.failed${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.jobs.failed${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.jobs.completed${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.failed_jobs_rate_threshold_critical}
* 100 , 0) > ${var.failed_jobs_rate_threshold_critical}
EOF
type = "metric alert"
@ -39,11 +40,12 @@ resource "datadog_monitor" "too_many_list_jobs_failed" {
message = "${coalesce(var.failed_listjobs_rate_message, var.message)}"
query = <<EOF
${var.failed_listjobs_rate_time_aggregator}(${var.failed_listjobs_rate_timeframe}):(
${var.failed_listjobs_rate_time_aggregator}(${var.failed_listjobs_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.jobs.list_jobs.failure${module.filter-tags.query_alert} by {resource_group,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.jobs.list_jobs.success${module.filter-tags.query_alert} by {resource_group,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.jobs.list_jobs.failure${module.filter-tags.query_alert} by {resource_group,name}.as_rate(), 0) )
) * 100 > ${var.failed_listjobs_rate_threshold_critical}
* 100, 0) > ${var.failed_listjobs_rate_threshold_critical}
EOF
type = "metric alert"
@ -74,11 +76,12 @@ resource "datadog_monitor" "too_many_query_jobs_failed" {
message = "${coalesce(var.failed_queryjobs_rate_message, var.message)}"
query = <<EOF
${var.failed_queryjobs_rate_time_aggregator}(${var.failed_queryjobs_rate_timeframe}):(
${var.failed_queryjobs_rate_time_aggregator}(${var.failed_queryjobs_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.jobs.query_jobs.failure${module.filter-tags.query_alert} by {resource_group,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.jobs.query_jobs.success${module.filter-tags.query_alert} by {resource_group,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.jobs.query_jobs.failure${module.filter-tags.query_alert} by {resource_group,name}.as_rate(), 0) )
) * 100 > ${var.failed_queryjobs_rate_threshold_critical}
* 100, 0) > ${var.failed_queryjobs_rate_threshold_critical}
EOF
type = "metric alert"
@ -165,11 +168,12 @@ resource "datadog_monitor" "too_many_c2d_methods_failed" {
message = "${coalesce(var.failed_c2d_methods_rate_message, var.message)}"
query = <<EOF
${var.failed_c2d_methods_rate_time_aggregator}(${var.failed_c2d_methods_rate_timeframe}):(
${var.failed_c2d_methods_rate_time_aggregator}(${var.failed_c2d_methods_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.c2d.methods.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.c2d.methods.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.c2d.methods.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.failed_c2d_methods_rate_threshold_critical}
* 100, 0) > ${var.failed_c2d_methods_rate_threshold_critical}
EOF
type = "metric alert"
@ -200,11 +204,12 @@ resource "datadog_monitor" "too_many_c2d_twin_read_failed" {
message = "${coalesce(var.failed_c2d_twin_read_rate_message, var.message)}"
query = <<EOF
${var.failed_c2d_twin_read_rate_time_aggregator}(${var.failed_c2d_twin_read_rate_timeframe}):(
${var.failed_c2d_twin_read_rate_time_aggregator}(${var.failed_c2d_twin_read_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.c2d.twin.read.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.c2d.twin.read.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.c2d.twin.read.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.failed_c2d_twin_read_rate_threshold_critical}
* 100, 0) > ${var.failed_c2d_twin_read_rate_threshold_critical}
EOF
type = "metric alert"
@ -235,11 +240,12 @@ resource "datadog_monitor" "too_many_c2d_twin_update_failed" {
message = "${coalesce(var.failed_c2d_twin_update_rate_message, var.message)}"
query = <<EOF
${var.failed_c2d_twin_update_rate_time_aggregator}(${var.failed_c2d_twin_update_rate_timeframe}):(
${var.failed_c2d_twin_update_rate_time_aggregator}(${var.failed_c2d_twin_update_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.c2d.twin.update.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.c2d.twin.update.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.c2d.twin.update.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.failed_c2d_twin_update_rate_threshold_critical}
* 100, 0) > ${var.failed_c2d_twin_update_rate_threshold_critical}
EOF
type = "metric alert"
@ -270,11 +276,12 @@ resource "datadog_monitor" "too_many_d2c_twin_read_failed" {
message = "${coalesce(var.failed_d2c_twin_read_rate_message, var.message)}"
query = <<EOF
${var.failed_d2c_twin_read_rate_time_aggregator}(${var.failed_d2c_twin_read_rate_timeframe}):(
${var.failed_d2c_twin_read_rate_time_aggregator}(${var.failed_d2c_twin_read_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.d2c.twin.read.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.d2c.twin.read.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.twin.read.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.failed_d2c_twin_read_rate_threshold_critical}
* 100, 0) > ${var.failed_d2c_twin_read_rate_threshold_critical}
EOF
type = "metric alert"
@ -305,11 +312,12 @@ resource "datadog_monitor" "too_many_d2c_twin_update_failed" {
message = "${coalesce(var.failed_d2c_twin_update_rate_message, var.message)}"
query = <<EOF
${var.failed_d2c_twin_update_rate_time_aggregator}(${var.failed_d2c_twin_update_rate_timeframe}):(
${var.failed_d2c_twin_update_rate_time_aggregator}(${var.failed_d2c_twin_update_rate_timeframe}):
default(
default(avg:azure.devices_iothubs.d2c.twin.update.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.d2c.twin.update.failure${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.twin.update.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.failed_d2c_twin_update_rate_threshold_critical}
* 100, 0) > ${var.failed_d2c_twin_update_rate_threshold_critical}
EOF
type = "metric alert"
@ -340,13 +348,14 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_dropped" {
message = "${coalesce(var.dropped_d2c_telemetry_egress_message, var.message)}"
query = <<EOF
${var.dropped_d2c_telemetry_egress_time_aggregator}(${var.dropped_d2c_telemetry_egress_timeframe}): (
${var.dropped_d2c_telemetry_egress_time_aggregator}(${var.dropped_d2c_telemetry_egress_timeframe}):
default(
default(avg:azure.devices_iothubs.d2c.telemetry.egress.dropped${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.d2c.telemetry.egress.dropped${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.orphaned${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.invalid${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.dropped_d2c_telemetry_egress_rate_threshold_critical}
* 100, 0) > ${var.dropped_d2c_telemetry_egress_rate_threshold_critical}
EOF
type = "metric alert"
@ -377,13 +386,14 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_orphaned" {
message = "${coalesce(var.orphaned_d2c_telemetry_egress_message, var.message)}"
query = <<EOF
${var.orphaned_d2c_telemetry_egress_time_aggregator}(${var.orphaned_d2c_telemetry_egress_timeframe}): (
${var.orphaned_d2c_telemetry_egress_time_aggregator}(${var.orphaned_d2c_telemetry_egress_timeframe}):
default(
default(avg:azure.devices_iothubs.d2c.telemetry.egress.orphaned${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.d2c.telemetry.egress.dropped${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.orphaned${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.invalid${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.orphaned_d2c_telemetry_egress_rate_threshold_critical}
* 100, 0) > ${var.orphaned_d2c_telemetry_egress_rate_threshold_critical}
EOF
type = "metric alert"
@ -414,13 +424,14 @@ resource "datadog_monitor" "too_many_d2c_telemetry_egress_invalid" {
message = "${coalesce(var.invalid_d2c_telemetry_egress_message, var.message)}"
query = <<EOF
${var.invalid_d2c_telemetry_egress_time_aggregator}(${var.invalid_d2c_telemetry_egress_timeframe}): (
${var.invalid_d2c_telemetry_egress_time_aggregator}(${var.invalid_d2c_telemetry_egress_timeframe}):
default(
default(avg:azure.devices_iothubs.d2c.telemetry.egress.invalid${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) / (
default(avg:azure.devices_iothubs.d2c.telemetry.egress.dropped${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.orphaned${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.invalid${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) +
default(avg:azure.devices_iothubs.d2c.telemetry.egress.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_rate(), 0) )
) * 100 > ${var.invalid_d2c_telemetry_egress_rate_threshold_critical}
* 100, 0) > ${var.invalid_d2c_telemetry_egress_rate_threshold_critical}
EOF
type = "metric alert"
@ -451,10 +462,11 @@ resource "datadog_monitor" "too_many_d2c_telemetry_ingress_nosent" {
message = "${coalesce(var.too_many_d2c_telemetry_ingress_nosent_message, var.message)}"
query = <<EOF
sum(${var.too_many_d2c_telemetry_ingress_nosent_timeframe}): (
sum(${var.too_many_d2c_telemetry_ingress_nosent_timeframe}):
default(
avg:azure.devices_iothubs.d2c.telemetry.ingress.all_protocol${module.filter-tags.query_alert} by {resource_group,region,name}.as_count() -
avg:azure.devices_iothubs.d2c.telemetry.ingress.success${module.filter-tags.query_alert} by {resource_group,region,name}.as_count()
) > 0
, 0) > 0
EOF
type = "metric alert"