首页 > 解决方案 > 在 terraform 模块中显式使用提供程序

问题描述

我试图在我的模块中显式调用提供程序以在 AzureCloud 和 AzureChinaCloud 中创建命名空间。但是,这样做时我遇到了问题。以下是我的配置:

terraform {
  required_providers {
    azurerm = {
      source  = "hashicorp/azurerm"
      version = "=2.78.0"
    }
  }
  backend "azurerm" {
    resource_group_name = "Terraform-rg"
    storage_account_name = "terraformstate"
    container_name = "tfstate"
    subscription_id = "00000000-0000-0000-0000-000000000000"
    key = "prod"
  }
}

provider "azurerm" {
  features {}
}


provider "azurerm" {
  features {}
  alias           = "sub2"
  subscription_id = "xxxxxxx-xxxxx-xxxx-xxxx-xxxxxxxxxxx"
  client_id       = "xxxxxxx-xxxxx-xxxx-xxxx-xxxxxxxxxxx"
  client_secret   = var.client_secret
  tenant_id       = "xxxxxxx-xxxxx-xxxx-xxxx-xxxxxxxxxxx"
  environment     = "china"
}

module "helm_ns_creation" {
  source = "./namespace/"
  providers = {
    azurerm = azurerm
    azurerm.sub2 = azurerm.sub2
   }
  applications = var.applications
  geo = var.geo
  ns_values = ["${file("../namespace/values.yaml")}"]
}

-------------------

provider "kubernetes" {
  config_path    = "config"
}

provider "helm" {
  kubernetes {
    config_path = "config"
  }
}

resource "kubernetes_namespace" "aks_namespace" {
  provider = azurerm.sub2
  for_each = {for ns in var.applications : ns.namespace_name => ns}
  metadata {
    annotations = {
      name = "${each.value.namespace_name}"
    }
    labels = {
      name = "${each.value.team_name}"
    }
    name = "${each.value.namespace_name}"
  }
  }

locals {
# get json
namespace_data = jsondecode(file(var.inputfile))
principal_ids = distinct([for principal in local.namespace_data.applications : principal.principal_id])
principal_ids_cn = distinct([for principal_cn in local.namespace_data.applications : principal_cn.principal_id_cn])
get_principal_ids = (var.geo == "cn" ? local.principal_ids_cn : local.principal_ids)
}

data "azurerm_subscription" "global" {
}

resource "azurerm_role_assignment" "custom" {
  for_each = toset(local.get_principal_ids)
  scope = data.azurerm_subscription.global.id
# scope = "/subscriptions/{$var.subscription_id}"
  role_definition_name = var.custom_role
  principal_id = each.key
}

resource "azurerm_role_assignment" "builtin" {
  for_each = toset(local.get_principal_ids)
  scope = data.azurerm_subscription.global.id
  role_definition_name = var.builtin_role
  principal_id = each.key
}

data "azurerm_subscription" "china" {
  provider = azurerm.sub2
}

resource "azurerm_role_assignment" "custom_cn" {
  for_each = toset(local.get_principal_ids)
  scope = data.azurerm_subscription.china.id
# scope = "/subscriptions/{$var.subscription_id}"
  role_definition_name = var.custom_role
  principal_id = each.key
}

resource "azurerm_role_assignment" "builtin_cn" {
  for_each = toset(local.get_principal_ids)
  scope = data.azurerm_subscription.china.id
  role_definition_name = var.builtin_role
  principal_id = each.key
}

当我运行代码在两个不同的云(中国和全球)中创建命名空间时,我仅在中国地区收到以下错误。但是,这同样适用于全球:

│ 错误:无法列出提供者注册状态,这可能是由于凭据无效或服务主体没有使用资源管理器API的权限,Azure错误:resources.ProvidersClient#List:响应请求失败:StatusCode =404 -- 原始错误:autorest/azure:服务返回错误。" Status=404 Code="SubscriptionNotFound" Message="找不到订阅'xxxxxxx-xxxxxx-xxxx-xxxx-xxxxxxxxxx'。"

with provider["registry.terraform.io/hashicorp/azurerm"],
│   on main.tf line 18, in provider "azurerm":
│   18: provider "azurerm" {

现在中国供应商的订阅失败了。我如何让它同时适用于云(中国和全球)。如果需要任何其他详细信息,请告诉我..

标签: azureterraformterraform-provider-azureazure-aks

解决方案


为了解释,我将整个代码分为 3 个部分,我在下面提到:

  1. 使用以下提供程序块,您必须已在PublicChina Cloud 中创建 AKS 集群。

    terraform {
      required_providers {
        azurerm = {
          source  = "hashicorp/azurerm"
          version = "=2.78.0"
        }
      }
      backend "azurerm" {
        resource_group_name = "Terraform-rg"
        storage_account_name = "terraformstate"
        container_name = "tfstate"
        subscription_id = "00000000-0000-0000-0000-000000000000"
        key = "prod"
      }
    }
    provider "azurerm" {
      features {}
    }
    provider "azurerm" {
      features {}
      alias           = "sub2"
      subscription_id = "xxxxxxx-xxxxx-xxxx-xxxx-xxxxxxxxxxx"
      client_id       = "xxxxxxx-xxxxx-xxxx-xxxx-xxxxxxxxxxx"
      client_secret   = var.client_secret
      tenant_id       = "xxxxxxx-xxxxx-xxxx-xxxx-xxxxxxxxxxx"
      environment     = "china"
    }
    resource "azurerm_kubernetes_cluster" "aks_cluster_public" {
      provider = azurerm
      name                = "ansuman-aks-001"
      location            = data.azurerm_resource_group.sub1.location
      resource_group_name = data.azurerm_resource_group.sub1.name
      dns_prefix          = "ansuman-aks-cluster"
    
    
    .....
    }
    
    resource "azurerm_kubernetes_cluster" "aks_cluster_china" {
      provider = azurerm.sub2
      name                = "ansuman-aks-001"
      location            = data.azurerm_resource_group.sub1.location
      resource_group_name = data.azurerm_resource_group.sub1.name
      dns_prefix          = "ansuman-aks-cluster"
    
    
    .....
    }
    
  2. 创建 AKS 集群后,您可以在公共云和中国云中使用Kubernetes Providers和创建Kubernetes Namespace,如下所示:

    provider "kubernetes" {
      host                   = "${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.host}"
      username               = "${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.username}"
      password               = "${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.password}"
      client_certificate     = base64decode("${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.client_certificate}")
      client_key             = base64decode("${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.client_key}")
      cluster_ca_certificate = base64decode("${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.cluster_ca_certificate}")
    }
    provider "kubernetes" {
      alias = "sub2"
      host                   = "${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.host}"
      username               = "${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.username}"
      password               = "${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.password}"
      client_certificate     = base64decode("${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.client_certificate}")
      client_key             = base64decode("${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.client_key}")
      cluster_ca_certificate = base64decode("${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.cluster_ca_certificate}")
    }
    
    resource "kubernetes_namespace" "app_namespace_public" {
      provider = kubernetes
      metadata {
        name = "my-namespace"
      }
      depends_on = [
        azurerm_kubernetes_cluster.aks_cluster_public
      ]
    }
    resource "kubernetes_namespace" "app_namespace_china" {
      provider = kubernetes.sub2
      metadata {
        name = "my-namespace"
      }
      depends_on = [
        azurerm_kubernetes_cluster.aks_cluster_china
      ]
    }
    

    正如您在Kubernetes Provider中看到的那样,我 aks_cluster_configs在公共和中国都使用过,因为我也在创建 AKS 集群,如果您不创建 AKS 集群,那么您也可以使用config paths,但概念将是相同的,即一个提供商中国的公共和其他,资源块也是如此。

  3. 完成上述操作后,您可以使用azurerm provider以下角色分配:

    data "azurerm_subscription" "global" {
      provider = azurerm.sub2
    }
    
    resource "azurerm_role_assignment" "custom" {
      provider = azurerm.sub2
      for_each = toset(local.get_principal_ids)
      scope = data.azurerm_subscription.global.id
      role_definition_name = var.custom_role
      principal_id = each.key
    }
    
    resource "azurerm_role_assignment" "builtin" {
      provider = azurerm
      for_each = toset(local.get_principal_ids)
      scope = data.azurerm_subscription.global.id
      role_definition_name = var.builtin_role
      principal_id = each.key
    }
    
    data "azurerm_subscription" "china" {
      provider = azurerm.sub2
    }
    
    resource "azurerm_role_assignment" "custom_cn" {
      provider = azurerm.sub2
      for_each = toset(local.get_principal_ids)
      scope = data.azurerm_subscription.china.id
      role_definition_name = var.custom_role
      principal_id = each.key
    }
    
    resource "azurerm_role_assignment" "builtin_cn" {
      provider = azurerm.sub2
      for_each = toset(local.get_principal_ids)
      scope = data.azurerm_subscription.china.id
      role_definition_name = var.builtin_role
      principal_id = each.key
    }
    

注意:如果你也在使用HelmProvider,那么你必须遵循与 Kubernetes Provider 相同的概念,你可以参考这个Terraform Helm Provider Documentation。请确保以与我们配置azurermkubernetes提供者相同的方式对其进行配置,并在模块或资源块中使用相同的方式。


我在具有 AKS 集群、命名空间和内置角色且没有自定义角色的环境中使用以下代码测试了上述内容,输出如下:

我的 Main.tf 文件:

provider "azurerm" {
  features {}
}
provider "azurerm" {
  alias = "sub2"
  subscription_id = "948d4068-xxxx-xxxx-xxxx-e00a844e059b"
  tenant_id = "72f988bf-xxxx-xxxx-xxxx-2d7cd011db47"
  client_id = "f6a2f33d-xxxx-xxxx-xxxx-d713a1bb37c0"
  client_secret = "inl7Q~Gvddxxxx-xxxx-xxxxaGPF3uSoL"
  features {}
}

data "azurerm_resource_group" "sub2" {
  provider = azurerm.sub2
  name = "ansumantest"
}
data "azurerm_resource_group" "sub1" {
  provider = azurerm
  name = "xxx-ansbal-xxxx"
}
resource "azurerm_kubernetes_cluster" "aks_cluster_public" {
  provider = azurerm
  name                = "ansuman-aks-001"
  location            = data.azurerm_resource_group.sub1.location
  resource_group_name = data.azurerm_resource_group.sub1.name
  dns_prefix          = "ansuman-aks-cluster"

  default_node_pool {
    name                  = "default"
    vm_size               = "Standard_D2_v2"
    availability_zones    = [1, 2]
    enable_auto_scaling   = true
    max_count             = 4
    min_count             = 1
    node_count            = 2
    type                  = "VirtualMachineScaleSets"
  }

  network_profile {
    network_plugin = "kubenet"
  }

  service_principal {
    client_id     = "f6a2f33d-xxxx-xxxx-xxxx-d713a1bb37c0"
    client_secret = "inl7Q~Gvxxxx-xxxx-xxxxiyaGPF3uSoL"
  }
  role_based_access_control {
    enabled = true
  }

}

resource "azurerm_kubernetes_cluster" "aks_cluster_china" {
  provider = azurerm.sub2
  name                = "ansuman-aks-001"
  location            = data.azurerm_resource_group.sub2.location
  resource_group_name = data.azurerm_resource_group.sub2.name
  dns_prefix          = "ansuman-aks-cluster"

  default_node_pool {
    name                  = "default"
    vm_size               = "Standard_D2_v2"
    availability_zones    = [1, 2]
    enable_auto_scaling   = true
    max_count             = 4
    min_count             = 1
    node_count            = 2
    type                  = "VirtualMachineScaleSets"
  }

  network_profile {
    network_plugin = "kubenet"
  }

  service_principal {
    client_id     = "f6a2f33d-xxxx-xxxx-xxxx-d713a1bb37c0"
    client_secret = "inl7Q~Gvddxxxx-xxxx-xxxx6ntiyaGPF3uSoL"
  }
  role_based_access_control {
    enabled = true
  }

}

provider "kubernetes" {
  host                   = "${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.host}"
  username               = "${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.username}"
  password               = "${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.password}"
  client_certificate     = base64decode("${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.client_certificate}")
  client_key             = base64decode("${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.client_key}")
  cluster_ca_certificate = base64decode("${azurerm_kubernetes_cluster.aks_cluster_public.kube_config.0.cluster_ca_certificate}")
}
provider "kubernetes" {
  alias = "sub2"
  host                   = "${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.host}"
  username               = "${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.username}"
  password               = "${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.password}"
  client_certificate     = base64decode("${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.client_certificate}")
  client_key             = base64decode("${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.client_key}")
  cluster_ca_certificate = base64decode("${azurerm_kubernetes_cluster.aks_cluster_china.kube_config.0.cluster_ca_certificate}")
}

resource "kubernetes_namespace" "app_namespace_public" {
  provider = kubernetes
  metadata {
    name = "my-namespace"
  }
  depends_on = [
    azurerm_kubernetes_cluster.aks_cluster_public
  ]
}
resource "kubernetes_namespace" "app_namespace_china" {
  provider = kubernetes.sub2
  metadata {
    name = "my-namespace"
  }
  depends_on = [
    azurerm_kubernetes_cluster.aks_cluster_china
  ]
}

data "azurerm_subscription" "global" {
  provider = azurerm
}
data "azurerm_client_config" "global" {
  provider = azurerm
}
resource "azurerm_role_assignment" "builtin" {
  provider = azurerm
  scope = data.azurerm_resource_group.sub1.id
  role_definition_name = "Azure Kubernetes Service Cluster Admin Role"
  principal_id = data.azurerm_client_config.global.object_id
}

data "azurerm_subscription" "china" {
  provider = azurerm.sub2
}
data "azurerm_client_config" "China" {
  provider = azurerm.sub2
}
resource "azurerm_role_assignment" "builtin_cn" {
  provider = azurerm.sub2
  scope = data.azurerm_subscription.china.id
  role_definition_name = "Azure Kubernetes Service Cluster Admin Role"
  principal_id = data.azurerm_client_config.China.object_id
}

输出:

在此处输入图像描述

在此处输入图像描述

注意:我只在公共云中使用了 2 个订阅,因为我没有中国云订阅,但是对于不同的云也是一样的,只需确保在提供程序块中添加environment参数。azurerm


推荐阅读