HEX
Server: Apache/2.4.65 (Ubuntu)
System: Linux ielts-store-v2 6.8.0-1036-gcp #38~22.04.1-Ubuntu SMP Thu Aug 14 01:19:18 UTC 2025 x86_64
User: root (0)
PHP: 7.2.34-54+ubuntu20.04.1+deb.sury.org+1
Disabled: pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,
Upload Files
File: //snap/google-cloud-cli/current/lib/googlecloudsdk/command_lib/managed_kafka/flags.yaml
cpu:
  api_field: cluster.capacityConfig.vcpuCount
  arg_name: cpu
  processor: googlecloudsdk.command_lib.managed_kafka.util:ValidateCPU
  help_text: |
    The number of vCPUs to provision for the cluster. The minimum is 3.

memory:
  api_field: cluster.capacityConfig.memoryBytes
  arg_name: memory
  type: googlecloudsdk.core.util.scaled_integer:ParseInteger
  help_text: |
    The memory to provision for the cluster in bytes. The value must be between 1 GiB and 8 GiB
    per vCPU.
    Ex. 1024Mi, 4Gi.

subnets:
  arg_name: subnets
  type: "googlecloudsdk.calliope.arg_parsers:ArgList:"
  help_text: |
    A comma-separated list of VPC subnets from which the cluster is accessible. Both broker and bootstrap server
    IP addresses and DNS entries are automatically created in each subnet. Only one subnet per
    network is allowed, and the subnet must be located in the same region as the cluster.
    The project may differ. A minimum of 1 subnet is required. A maximum of 10 subnets can be
    specified. Use commas to separate multiple subnets. The name of the subnet must be in the format
    projects/``PROJECT_ID''/regions/``REGION''/subnetworks/``SUBNET''.

labels:
  api_field: cluster.labels
  arg_name: labels
  metavar: KEY=VALUE
  type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
  help_text: |
    List of label KEY=VALUE pairs to add.
    Keys must start with a lowercase character and contain only hyphens (`-`),
    underscores (```_```), lowercase characters, and numbers. Values must contain only
    hyphens (`-`), underscores (```_```), lowercase characters, and numbers.

encryption-key:
  api_field: cluster.gcpConfig.kmsKey
  arg_name: encryption-key
  help_text: |
    The relative resource path of the Cloud KMS key to use for encryption in the form:
    projects/``PROJECT_ID''/locations/``LOCATION''/keyRings/``KEY_RING''/cryptoKeys/``KEY''.
    The key must be located in the same region as the cluster. The key cannot be changed once set.

auto-rebalance:
  api_field: cluster.rebalanceConfig.mode
  arg_name: auto-rebalance
  action: store_true
  help_text: |
    Whether the automatic rebalancing is enabled. If automatic rebalancing
    is enabled, topic partitions are rebalanced among brokers when the number of
    CPUs in the cluster changes. Automatic rebalancing is enabled by default.
    Use --no-auto-rebalance to disable this flag.
  choices:
  - arg_value: true
    enum_value: AUTO_REBALANCE_ON_SCALE_UP
  - arg_value: false
    enum_value: NO_REBALANCE

mtls-ca-pools:
  arg_name: mtls-ca-pools
  type: "googlecloudsdk.calliope.arg_parsers:ArgList:"
  help_text: |
    A comma-separated list of CA pools from the Google Cloud Certificate Authority Service.
    The root certificates of these CA pools will be installed in the truststore of each broker
    in the cluster for use with mTLS. A maximum of 10 CA pools can be specified.
    CA pools can be in a different project and region than the cluster.
    This command overwrites the entire set of pools currently configured on the cluster.
    If you want to add a new pool to an existing configuration, you must provide the full list of
    both the old and new CA pools in the command.
    Each CA pool must be in the format
    projects/``PROJECT_ID''/locations/``LOCATION''/caPools/``CA_POOL''. Clear the CA pools
    using the `--clear-mtls-ca-pools` flag.

clear-mtls-ca-pools:
  arg_name: clear-mtls-ca-pools
  action: store_true
  help_text: |
    Remove all the CA pools from the cluster. This will remove all root certificates
    from the truststore of each broker in the cluster.

allow-broker-downscale-on-cluster-upscale:
  api_field: cluster.updateOptions.allowBrokerDownscaleOnClusterUpscale
  arg_name: allow-broker-downscale-on-cluster-upscale
  action: store_true
  default: null
  help_text: |
    If enabled, this setting allows an update operation that could significantly decrease the
    per-broker vCPU and/or memory allocation, which can lead to reduced performance and
    availability. By default, an update operation will fail if it results in a reduction of 10% or
    more to the brokers' vCPU or memory allocation.

ssl-principal-mapping-rules:
  api_field: cluster.tlsConfig.sslPrincipalMappingRules
  arg_name: ssl-principal-mapping-rules
  type: str
  help_text: |
    The rules for mapping mTLS certificate Distinguished Names (DNs) to shortened principal names
    for Kafka ACLs. This flag corresponds exactly to the `ssl.principal.mapping.rules` broker config
    and matches the format and syntax defined in the Apache Kafka documentation.
    Setting or modifying this field will trigger a rolling restart of the Kafka brokers
    to apply the change. An empty string means that the default Kafka behavior is used.
    Example: "RULE:^CN=(.*?),OU=ServiceUsers.*$/$1@example.com/,DEFAULT"

# Trying to define this as a resouce causes test failures in yaml_command_schema_test so we define
# it as a flag instead.
kafka-cluster:
  arg_name: kafka-cluster
  help_text: |
    The resource path of the Kafka cluster to connect to, or the name of the Kafka cluster to
    connect to if the cluster is in the same project as the Connect cluster.

connect-cpu:
  api_field: connectCluster.capacityConfig.vcpuCount
  arg_name: cpu
  processor: googlecloudsdk.command_lib.managed_kafka.util:ValidateCPU
  help_text: |
    The number of vCPUs to provision for the cluster. The minimum is 3.

connect-memory:
  api_field: connectCluster.capacityConfig.memoryBytes
  arg_name: memory
  type: googlecloudsdk.core.util.scaled_integer:ParseInteger
  help_text: |
    The memory to provision for the cluster in bytes. The value must be between 1 GiB and 8 GiB
    per vCPU.
    Ex. 1024Mi, 4Gi.

connect-labels:
  api_field: connectCluster.labels
  arg_name: labels
  metavar: KEY=VALUE
  type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
  help_text: |
    List of label KEY=VALUE pairs to add.
    Keys must start with a lowercase character and contain only hyphens (`-`),
    underscores (```_```), lowercase characters, and numbers. Values must contain only
    hyphens (`-`), underscores (```_```), lowercase characters, and numbers.

clear-connect-labels:
  arg_name: clear-labels
  action: store_true
  help_text: |
    Remove all the labels from the connect cluster.

connect-encryption-key:
  api_field: connectCluster.gcpConfig.kmsKey
  arg_name: encryption-key
  help_text: |
    The relative resource path of the Cloud KMS key to use for encryption in the form:
    projects/``PROJECT_ID''/locations/``LOCATION''/keyRings/``KEY_RING''/cryptoKeys/``KEY''.
    The key must be located in the same region as the cluster. The key cannot be changed once set.

dns-name:
  arg_name: dns-name
  api_field: connectCluster.gcpConfig.accessConfig.networkConfigs.dnsDomainNames
  repeated: true
  action: append
  help_text: |
    DNS domain name from the subnet's network to be made visible to the Connect Cluster.

secret:
  arg_name: secret
  api_field: connectCluster.gcpConfig.secretPaths
  repeated: true
  action: append
  help_text: |
    Secrets to load into workers. Exact SecretVersions from Secret Manager must
    be provided -- aliases are not supported. Up to 32 secrets may be loaded
    into one cluster.
    Format: projects/<project-id>/secrets/<secret-name>/versions/<version-id>

connect-primary-subnet:
  arg_name: primary-subnet
  api_field: connectCluster.gcpConfig.accessConfig.networkConfigs.primarySubnet
  help_text: |
    VPC subnet to make available to the Kafka Connect cluster. Structured
    like: projects/{project}/regions/{region}/subnetworks/{subnet_id}.
    The primary subnet is used to create a Private Service Connect (PSC) interface for the Kafka Connect workers.
    It must be located in the same region as the Connect cluster.

connect-additional-subnet:
  arg_name: additional-subnet
  api_field: connectCluster.gcpConfig.accessConfig.networkConfigs.additionalSubnets
  repeated: true
  action: append
  help_text: |
    Additional subnet to make available to the Kafka Connect cluster. Structured
    like: projects/{project}/regions/{region}/subnetworks/{subnet_id}.

partitions:
  api_field: topic.partitionCount
  arg_name: partitions
  help_text: |
    The number of partitions in a topic. You can increase the partition count for a topic, but you
    cannot decrease it. Increasing partitions for a topic that uses a key might change how messages
    are distributed.

replication-factor:
  api_field: topic.replicationFactor
  arg_name: replication-factor
  help_text: |
    The number of replicas of each partition. A replication factor of 3 is
    recommended for high availability.

configs:
  api_field: topic.configs
  arg_name: configs
  metavar: KEY=VALUE
  type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
  help_text: |
    Configuration for the topic that are overridden from the cluster defaults.
    The key of the map is a Kafka topic property name, for example:
    `cleanup.policy=compact`,`compression.type=producer`. If you provide a map with a key that
    already exists, only that configuration is updated. If the map contains a key that does not
    exist, the entry is appended to the topic configuration.

connectCluster-configs:
  arg_name: configs
  api_field: connectCluster.config
  metavar: KEY=VALUE
  type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
  help_text: |
    Configuration for the connect cluster that are overridden from the cluster defaults.
    The key of the map is a Kafka topic property name, for example:
    `cleanup.policy=compact`,`compression.type=producer`.

connectCluster-config-file:
  arg_name: config-file
  metavar: JSON|YAML|FILE
  type: "googlecloudsdk.calliope.arg_parsers:ArgObject:"
  help_text: |
    The path to the JSON or YAML file containing the configuration that are overridden
    from the cluster or connector defaults. This also supports inline JSON or YAML.

connector-configs:
  arg_name: configs
  api_field: connector.configs
  metavar: KEY=VALUE
  type: "googlecloudsdk.calliope.arg_parsers:ArgDict:"
  help_text: |
    Configuration for the connector that are overridden from the connector defaults.
    The key of the map is a Kafka topic property name, for example:
    `cleanup.policy=compact`,`compression.type=producer`.

connector-config-file:
  arg_name: config-file
  metavar: JSON|YAML|FILE
  type: "googlecloudsdk.calliope.arg_parsers:ArgObject:"
  help_text: |
    The path to the JSON or YAML file containing the configuration that are overridden
    from the connector defaults. This also supports inline JSON or YAML.

task-restart-min-backoff:
  api_field: connector.taskRestartPolicy.minimumBackoff
  arg_name: task-restart-min-backoff
  type: googlecloudsdk.core.util.times:ParseDuration
  processor: googlecloudsdk.core.util.times:FormatDuration
  help_text: |
    The minimum amount of time to wait before retrying a failed task in seconds.
    This sets a lower bound for the backoff delay. The default value is 60s.
    See $ gcloud topic datetimes for information on duration formats.
task-restart-max-backoff:
  api_field: connector.taskRestartPolicy.maximumBackoff
  arg_name: task-restart-max-backoff
  type: googlecloudsdk.core.util.times:ParseDuration
  processor: googlecloudsdk.core.util.times:FormatDuration
  help_text: |
    The maximum amount of time to wait before retrying a failed task in seconds.
    This sets an upper bound for the backoff delay. The default value is 1800s (30 minutes).
    See $ gcloud topic datetimes for information on duration formats.

clear-secrets:
  arg_name: clear-secrets
  action: store_true
  help_text: |
    Remove all the secrets from the connect cluster.

clear-dns-names:
  arg_name: clear-dns-names
  action: store_true
  help_text: |
    Remove all the DNS domain names for the connect cluster.

clear-configs:
  arg_name: clear-configs
  action: store_true
  help_text: |
    Remove all the configurations for the topic.

# TODO(b/336117815): Provide hard examples and external docs on this flag.
topics-file:
  arg_name: topics-file
  type: "googlecloudsdk.calliope.arg_parsers:ArgObject:"
  metavar: JSON|YAML|FILE
  help_text: |
    The path to the JSON or YAML file containing the configuration of the topics to be updated for
    the consumer group. This also supports inline JSON or YAML.

acl-entry:
  arg_name: acl-entry
  api_field: acl.aclEntries
  help_text: |
    An acl entry that configures access for a principal, for a specific operation on the acl's
    resource pattern. This flag can be repeated.

    ``PRINCIPAL'' is the principal. Specified as Google Cloud account, with the Kafka StandardAuthorizer prefix
    "User:". For example: "User:admin@project.iam.gserviceaccount.com".
    Can be the wildcard "User:```*```" to refer to all users.

    ``OPERATION'' is the operation type. Allowed values are: ALL, READ, WRITE,
    CREATE, DELETE, ALTER, DESCRIBE, CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS,
    IDEMPOTENT_WRITE.

    ``PERMISSION-TYPE'' is the permission type. Allowed values are: ALLOW, DENY.

    ``HOST'' is the host. Must be set to "```*```" for Managed Service for Apache Kafka.

    Example acl-entry:
      "principal=User:admin@project.iam.gserviceaccount.com,operation=ALL,permission-type=ALLOW,host=```*```"
  type:
    arg_dict:
      flatten: false
      spec:
      - api_field: principal
        arg_name: principal
        type: str
        required: true
      - api_field: operation
        arg_name: operation
        type: str
        required: true
      - api_field: permissionType
        arg_name: permission-type
        type: str
        required: true
      - api_field: host
        arg_name: host
        type: str
        required: true

acl-entries-from-file:
  arg_name: acl-entries-from-file
  api_field: acl
  type: "googlecloudsdk.calliope.arg_parsers:FileContents:"
  processor: googlecloudsdk.core.yaml:load
  help_text: |
    Path to a JSON or YAML file containing the acl entries to use in the acl.

etag:
  arg_name: etag
  api_field: acl.etag
  type: str
  required: true
  help_text: |
    etag returned in the response to a previous create or describe
    command. The etag is used for concurrency control, to ensure that the
    client and server agree on the current set of acl entries in the Kafka
    cluster, before full replacement in the update command.

acl-entry-principal:
  api_field: aclEntry.principal
  arg_name: principal
  type: str
  required: true
  help_text: |
    The principal. Specified as Google Cloud account, with the Kafka StandardAuthorizer prefix
    "User:". For example: "User:admin@project.iam.gserviceaccount.com".
    Can be the wildcard "User:```*```" to refer to all users.

acl-entry-operation:
  api_field: aclEntry.operation
  arg_name: operation
  type: str
  required: true
  help_text: |
    The operation type. Allowed values are: ALL, READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE,
    CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE.

    See https://kafka.apache.org/documentation/#operations_resources_and_protocols
    for the mapping of operations to Kafka protocols.

acl-entry-permission-type:
  api_field: aclEntry.permissionType
  arg_name: permission-type
  type: str
  default: "ALLOW"
  help_text: |
    The permission type. Allowed values are: ALLOW, DENY.

acl-entry-host:
  api_field: aclEntry.host
  arg_name: host
  type: str
  default: '*'
  help_text: |
    The host. Must be set to "```*```" for Managed Service for Apache Kafka.

full-view:
  api_field: view
  arg_name: full-view
  action: store_true
  default: null
  help_text: |
    Whether full cluster view is returned. If provided, the cluster view will include
    everything, including data fetched from the Kafka cluster source of truth. If not provided, the
    API will default to the basic view that only includes the basic metadata of the cluster.
  choices:
  - arg_value: true
    enum_value: CLUSTER_VIEW_FULL
  - arg_value: false
    enum_value: CLUSTER_VIEW_BASIC