HEX
Server: Apache/2.4.65 (Ubuntu)
System: Linux ielts-store-v2 6.8.0-1036-gcp #38~22.04.1-Ubuntu SMP Thu Aug 14 01:19:18 UTC 2025 x86_64
User: root (0)
PHP: 7.2.34-54+ubuntu20.04.1+deb.sury.org+1
Disabled: pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,
Upload Files
File: //snap/google-cloud-cli/396/help/man/man1/gcloud_alpha_container_node-pools_create.1
.TH "GCLOUD_ALPHA_CONTAINER_NODE\-POOLS_CREATE" 1



.SH "NAME"
.HP
gcloud alpha container node\-pools create \- create a node pool in a running cluster



.SH "SYNOPSIS"
.HP
\f5gcloud alpha container node\-pools create\fR \fINAME\fR [\fB\-\-accelerator\fR=[\fItype\fR=\fITYPE\fR,[\fIcount\fR=\fICOUNT\fR,\fIgpu\-driver\-version\fR=\fIGPU_DRIVER_VERSION\fR,\fIgpu\-partition\-size\fR=\fIGPU_PARTITION_SIZE\fR,\fIgpu\-sharing\-strategy\fR=\fIGPU_SHARING_STRATEGY\fR,\fImax\-shared\-clients\-per\-gpu\fR=\fIMAX_SHARED_CLIENTS_PER_GPU\fR],...]] [\fB\-\-accelerator\-network\-profile\fR=\fIACCELERATOR_NETWORK_PROFILE\fR] [\fB\-\-additional\-node\-network\fR=[\fInetwork\fR=\fINETWORK_NAME\fR,\fIsubnetwork\fR=\fISUBNETWORK_NAME\fR,...]] [\fB\-\-additional\-pod\-network\fR=[\fIsubnetwork\fR=\fISUBNETWORK_NAME\fR,\fIpod\-ipv4\-range\fR=\fISECONDARY_RANGE_NAME\fR,[\fImax\-pods\-per\-node\fR=\fINUM_PODS\fR],...]] [\fB\-\-async\fR] [\fB\-\-autoscaled\-rollout\-policy\fR=[\fIwait\-for\-drain\-duration\fR=\fIWAIT\-FOR\-DRAIN\-DURATION\fR]] [\fB\-\-boot\-disk\-kms\-key\fR=\fIBOOT_DISK_KMS_KEY\fR] [\fB\-\-boot\-disk\-provisioned\-iops\fR=\fIBOOT_DISK_PROVISIONED_IOPS\fR] [\fB\-\-boot\-disk\-provisioned\-throughput\fR=\fIBOOT_DISK_PROVISIONED_THROUGHPUT\fR] [\fB\-\-cluster\fR=\fICLUSTER\fR] [\fB\-\-confidential\-node\-type\fR=\fICONFIDENTIAL_NODE_TYPE\fR] [\fB\-\-containerd\-config\-from\-file\fR=\fIPATH_TO_FILE\fR] [\fB\-\-data\-cache\-count\fR=\fIDATA_CACHE_COUNT\fR] [\fB\-\-disable\-pod\-cidr\-overprovision\fR] [\fB\-\-disk\-size\fR=\fIDISK_SIZE\fR] [\fB\-\-disk\-type\fR=\fIDISK_TYPE\fR] [\fB\-\-enable\-autoprovisioning\fR] [\fB\-\-enable\-autorepair\fR] [\fB\-\-no\-enable\-autoupgrade\fR] [\fB\-\-enable\-blue\-green\-upgrade\fR] [\fB\-\-enable\-confidential\-nodes\fR] [\fB\-\-enable\-confidential\-storage\fR] [\fB\-\-enable\-gvnic\fR] [\fB\-\-enable\-image\-streaming\fR] [\fB\-\-enable\-insecure\-kubelet\-readonly\-port\fR] [\fB\-\-enable\-nested\-virtualization\fR] [\fB\-\-enable\-private\-nodes\fR] [\fB\-\-enable\-queued\-provisioning\fR] [\fB\-\-enable\-surge\-upgrade\fR] [\fB\-\-flex\-start\fR] [\fB\-\-image\-type\fR=\fIIMAGE_TYPE\fR] [\fB\-\-labels\fR=[\fIKEY\fR=\fIVALUE\fR,...]] [\fB\-\-linux\-sysctls\fR=\fIKEY\fR=\fIVALUE\fR,[\fIKEY\fR=\fIVALUE\fR,...]] [\fB\-\-local\-ssd\-encryption\-mode\fR=\fILOCAL_SSD_ENCRYPTION_MODE\fR] [\fB\-\-logging\-variant\fR=\fILOGGING_VARIANT\fR] [\fB\-\-machine\-type\fR=\fIMACHINE_TYPE\fR,\ \fB\-m\fR\ \fIMACHINE_TYPE\fR] [\fB\-\-max\-pods\-per\-node\fR=\fIMAX_PODS_PER_NODE\fR] [\fB\-\-max\-run\-duration\fR=\fIMAX_RUN_DURATION\fR] [\fB\-\-max\-surge\-upgrade\fR=\fIMAX_SURGE_UPGRADE\fR;\ default=1] [\fB\-\-max\-unavailable\-upgrade\fR=\fIMAX_UNAVAILABLE_UPGRADE\fR] [\fB\-\-metadata\fR=\fIKEY\fR=\fIVALUE\fR,[\fIKEY\fR=\fIVALUE\fR,...]] [\fB\-\-metadata\-from\-file\fR=\fIKEY\fR=\fILOCAL_FILE_PATH\fR,[...]] [\fB\-\-min\-cpu\-platform\fR=\fIPLATFORM\fR] [\fB\-\-network\-performance\-configs\fR=[\fIPROPERTY\fR=\fIVALUE\fR,...]] [\fB\-\-node\-group\fR=\fINODE_GROUP\fR] [\fB\-\-node\-labels\fR=[\fINODE_LABEL\fR,...]] [\fB\-\-node\-locations\fR=\fIZONE\fR,[\fIZONE\fR,...]] [\fB\-\-node\-pool\-soak\-duration\fR=\fINODE_POOL_SOAK_DURATION\fR] [\fB\-\-node\-taints\fR=[\fINODE_TAINT\fR,...]] [\fB\-\-node\-version\fR=\fINODE_VERSION\fR] [\fB\-\-num\-nodes\fR=\fINUM_NODES\fR] [\fB\-\-opportunistic\-maintenance\fR=[\fInode\-idle\-time\fR=\fINODE_IDLE_TIME\fR,\fIwindow\fR=\fIWINDOW\fR,\fImin\-nodes\fR=\fIMIN_NODES\fR,...]] [\fB\-\-performance\-monitoring\-unit\fR=\fIPERFORMANCE_MONITORING_UNIT\fR] [\fB\-\-placement\-policy\fR=\fIPLACEMENT_POLICY\fR] [\fB\-\-placement\-type\fR=\fIPLACEMENT_TYPE\fR] [\fB\-\-preemptible\fR] [\fB\-\-resource\-manager\-tags\fR=[\fIKEY\fR=\fIVALUE\fR,...]] [\fB\-\-sandbox\fR=[\fItype\fR=\fITYPE\fR]] [\fB\-\-secondary\-boot\-disk\fR=[\fIdisk\-image\fR=\fIDISK_IMAGE\fR,[\fImode\fR=\fIMODE\fR],...]] [\fB\-\-shielded\-integrity\-monitoring\fR] [\fB\-\-shielded\-secure\-boot\fR] [\fB\-\-sole\-tenant\-min\-node\-cpus\fR=\fISOLE_TENANT_MIN_NODE_CPUS\fR] [\fB\-\-sole\-tenant\-node\-affinity\-file\fR=\fISOLE_TENANT_NODE_AFFINITY_FILE\fR] [\fB\-\-spot\fR] [\fB\-\-standard\-rollout\-policy\fR=[\fIbatch\-node\-count\fR=\fIBATCH_NODE_COUNT\fR,\fIbatch\-percent\fR=\fIBATCH_NODE_PERCENTAGE\fR,\fIbatch\-soak\-duration\fR=\fIBATCH_SOAK_DURATION\fR,...]] [\fB\-\-storage\-pools\fR=\fISTORAGE_POOL\fR,[...]] [\fB\-\-system\-config\-from\-file\fR=\fIPATH_TO_FILE\fR] [\fB\-\-tags\fR=\fITAG\fR,[\fITAG\fR,...]] [\fB\-\-threads\-per\-core\fR=\fITHREADS_PER_CORE\fR] [\fB\-\-tpu\-topology\fR=\fITPU_TOPOLOGY\fR] [\fB\-\-windows\-os\-version\fR=\fIWINDOWS_OS_VERSION\fR] [\fB\-\-workload\-metadata\fR=\fIWORKLOAD_METADATA\fR] [\fB\-\-create\-pod\-ipv4\-range\fR=[\fIKEY\fR=\fIVALUE\fR,...]\ |\ \fB\-\-pod\-ipv4\-range\fR=\fINAME\fR] [\fB\-\-enable\-autoscaling\fR\ \fB\-\-location\-policy\fR=\fILOCATION_POLICY\fR\ \fB\-\-max\-nodes\fR=\fIMAX_NODES\fR\ \fB\-\-min\-nodes\fR=\fIMIN_NODES\fR\ \fB\-\-total\-max\-nodes\fR=\fITOTAL_MAX_NODES\fR\ \fB\-\-total\-min\-nodes\fR=\fITOTAL_MIN_NODES\fR] [\fB\-\-enable\-best\-effort\-provision\fR\ \fB\-\-min\-provision\-nodes\fR=\fIMIN_PROVISION_NODES\fR] [\fB\-\-ephemeral\-storage\fR[=[\fIlocal\-ssd\-count\fR=\fILOCAL\-SSD\-COUNT\fR]]\ |\ \fB\-\-ephemeral\-storage\-local\-ssd\fR[=[\fIcount\fR=\fICOUNT\fR]]\ |\ \fB\-\-local\-nvme\-ssd\-block\fR[=[\fIcount\fR=\fICOUNT\fR]]\ |\ \fB\-\-local\-ssd\-count\fR=\fILOCAL_SSD_COUNT\fR\ |\ \fB\-\-local\-ssd\-volumes\fR=[[\fIcount\fR=\fICOUNT\fR],[\fItype\fR=\fITYPE\fR],[\fIformat\fR=\fIFORMAT\fR],...]] [\fB\-\-location\fR=\fILOCATION\fR\ |\ \fB\-\-region\fR=\fIREGION\fR\ |\ \fB\-\-zone\fR=\fIZONE\fR,\ \fB\-z\fR\ \fIZONE\fR] [\fB\-\-reservation\fR=\fIRESERVATION\fR\ \fB\-\-reservation\-affinity\fR=\fIRESERVATION_AFFINITY\fR] [\fB\-\-scopes\fR=[\fISCOPE\fR,...];\ default="gke\-default"\ \fB\-\-service\-account\fR=\fISERVICE_ACCOUNT\fR] [\fIGCLOUD_WIDE_FLAG\ ...\fR]



.SH "DESCRIPTION"

\fB(ALPHA)\fR \fBgcloud alpha container node\-pools create\fR facilitates the
creation of a node pool in a Google Kubernetes Engine cluster. A variety of
options exists to customize the node configuration and the number of nodes
created.



.SH "EXAMPLES"

To create a new node pool "node\-pool\-1" with the default options in the
cluster "sample\-cluster", run:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=sample\-cluster
.RE

The new node pool will show up in the cluster after all the nodes have been
provisioned.

To create a node pool with 5 nodes, run:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=sample\-cluster \-\-num\-nodes=5
.RE



.SH "POSITIONAL ARGUMENTS"

.RS 2m
.TP 2m
\fINAME\fR

The name of the node pool to create.


.RE
.sp

.SH "FLAGS"

.RS 2m
.TP 2m
\fB\-\-accelerator\fR=[\fItype\fR=\fITYPE\fR,[\fIcount\fR=\fICOUNT\fR,\fIgpu\-driver\-version\fR=\fIGPU_DRIVER_VERSION\fR,\fIgpu\-partition\-size\fR=\fIGPU_PARTITION_SIZE\fR,\fIgpu\-sharing\-strategy\fR=\fIGPU_SHARING_STRATEGY\fR,\fImax\-shared\-clients\-per\-gpu\fR=\fIMAX_SHARED_CLIENTS_PER_GPU\fR],...]

Attaches accelerators (e.g. GPUs) to all nodes.

.RS 2m
.TP 2m
\fBtype\fR
(Required) The specific type (e.g. nvidia\-tesla\-t4 for NVIDIA T4) of
accelerator to attach to the instances. Use \f5gcloud compute accelerator\-types
list\fR to learn about all available accelerator types.

.TP 2m
\fBcount\fR
(Optional) The number of accelerators to attach to the instances. The default
value is 1.

.TP 2m
\fBgpu\-driver\-version\fR
(Optional) The NVIDIA driver version to install. GPU_DRIVER_VERSION must be one
of:

.RS 2m
`default`: Install the default driver version for this GKE version. For GKE version 1.30.1\-gke.1156000 and later, this is the default option.
.RE

.RS 2m
`latest`: Install the latest driver version available for this GKE version.
Can only be used for nodes that use Container\-Optimized OS.
.RE

.RS 2m
`disabled`: Skip automatic driver installation. You must manually install a
driver after you create the cluster. For GKE version 1.30.1\-gke.1156000 and earlier, this is the default option.
To manually install the GPU driver, refer to https://cloud.google.com/kubernetes\-engine/docs/how\-to/gpus#installing_drivers.
.RE

.TP 2m
\fBgpu\-partition\-size\fR
(Optional) The GPU partition size used when running multi\-instance GPUs. For
information about multi\-instance GPUs, refer to:
https://cloud.google.com/kubernetes\-engine/docs/how\-to/gpus\-multi

.TP 2m
\fBgpu\-sharing\-strategy\fR
(Optional) The GPU sharing strategy (e.g. time\-sharing) to use. For information
about GPU sharing, refer to:
https://cloud.google.com/kubernetes\-engine/docs/concepts/timesharing\-gpus

.TP 2m
\fBmax\-shared\-clients\-per\-gpu\fR
(Optional) The max number of containers allowed to share each GPU on the node.
This field is used together with \f5gpu\-sharing\-strategy\fR.

.RE
.sp
.TP 2m
\fB\-\-accelerator\-network\-profile\fR=\fIACCELERATOR_NETWORK_PROFILE\fR

Accelerator Network Profile that will be used by the node pool.

Currently only the \f5auto\fR value is supported. A compatible Accelerator
machine type needs to be specified with the \f5\-\-machine\-type\fR flag. An
Accelerator Network Profiles will be created if it does not exist.

\fIACCELERATOR_NETWORK_PROFILE\fR must be (only one value is supported):
\fBauto\fR.

.TP 2m
\fB\-\-additional\-node\-network\fR=[\fInetwork\fR=\fINETWORK_NAME\fR,\fIsubnetwork\fR=\fISUBNETWORK_NAME\fR,...]

Attach an additional network interface to each node in the pool. This parameter
can be specified up to 7 times.

e.g. \-\-additional\-node\-network network=dataplane,subnetwork=subnet\-dp

.RS 2m
.TP 2m
\fBnetwork\fR
(Required) The network to attach the new interface to.

.TP 2m
\fBsubnetwork\fR
(Required) The subnetwork to attach the new interface to.

.RE
.sp
.TP 2m
\fB\-\-additional\-pod\-network\fR=[\fIsubnetwork\fR=\fISUBNETWORK_NAME\fR,\fIpod\-ipv4\-range\fR=\fISECONDARY_RANGE_NAME\fR,[\fImax\-pods\-per\-node\fR=\fINUM_PODS\fR],...]

Specify the details of a secondary range to be used for an additional pod
network. Not needed if you use "host" typed NIC from this network. This
parameter can be specified up to 35 times.

e.g. \-\-additional\-pod\-network
subnetwork=subnet\-dp,pod\-ipv4\-range=sec\-range\-blue,max\-pods\-per\-node=8.

.RS 2m
.TP 2m
\fBsubnetwork\fR
(Optional) The name of the subnetwork to link the pod network to. If not
specified, the pod network defaults to the subnet connected to the default
network interface.

.TP 2m
\fBpod\-ipv4\-range\fR
(Required) The name of the secondary range in the subnetwork. The range must
hold at least (2 * MAX_PODS_PER_NODE * MAX_NODES_IN_RANGE) IPs.

.TP 2m
\fBmax\-pods\-per\-node\fR
(Optional) Maximum amount of pods per node that can utilize this ipv4\-range.
Defaults to NodePool (if specified) or Cluster value.

.RE
.sp
.TP 2m
\fB\-\-async\fR

Return immediately, without waiting for the operation in progress to complete.

.TP 2m
\fB\-\-autoscaled\-rollout\-policy\fR=[\fIwait\-for\-drain\-duration\fR=\fIWAIT\-FOR\-DRAIN\-DURATION\fR]

Autoscaled rollout policy options for blue\-green upgrade.

.RS 2m
.TP 2m
\fBwait\-for\-drain\-duration\fR
(Optional) Time in seconds to wait after cordoning the blue pool before draining
the nodes.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster  \-\-enable\-blue\-green\-upgrade  \e
    \-\-autoscaled\-rollout\-policy=""
.RE

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster  \-\-enable\-blue\-green\-upgrade  \e
    \-\-autoscaled\-rollout\-policy=wait\-for\-drain\-duration=7200s
.RE

.RE
.sp
.TP 2m
\fB\-\-boot\-disk\-kms\-key\fR=\fIBOOT_DISK_KMS_KEY\fR

The Customer Managed Encryption Key used to encrypt the boot disk attached to
each node in the node pool. This should be of the form
projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME].
For more information about protecting resources with Cloud KMS Keys please see:
https://cloud.google.com/compute/docs/disks/customer\-managed\-encryption

.TP 2m
\fB\-\-boot\-disk\-provisioned\-iops\fR=\fIBOOT_DISK_PROVISIONED_IOPS\fR

Configure the Provisioned IOPS for the node pool boot disks. Only valid for
hyperdisk\-balanced boot disks.

.TP 2m
\fB\-\-boot\-disk\-provisioned\-throughput\fR=\fIBOOT_DISK_PROVISIONED_THROUGHPUT\fR

Configure the Provisioned Throughput for the node pool boot disks. Only valid
for hyperdisk\-balanced boot disks.

.TP 2m
\fB\-\-cluster\fR=\fICLUSTER\fR

The cluster to add the node pool to. Overrides the default
\fBcontainer/cluster\fR property value for this command invocation.

.TP 2m
\fB\-\-confidential\-node\-type\fR=\fICONFIDENTIAL_NODE_TYPE\fR

Enable confidential nodes for the node pool. Enabling Confidential Nodes will
create nodes using Confidential VM
https://cloud.google.com/compute/confidential\-vm/docs/about\-cvm.
\fICONFIDENTIAL_NODE_TYPE\fR must be one of: \fBsev\fR, \fBsev_snp\fR,
\fBtdx\fR, \fBdisabled\fR.

.TP 2m
\fB\-\-containerd\-config\-from\-file\fR=\fIPATH_TO_FILE\fR

Path of the YAML file that contains containerd configuration entries like
configuring access to private image registries.

For detailed information on the configuration usage, please refer to
https://cloud.google.com/kubernetes\-engine/docs/how\-to/customize\-containerd\-configuration.

Note: Updating the containerd configuration of an existing cluster or node pool
requires recreation of the existing nodes, which might cause disruptions in
running workloads.

Use a full or relative path to a local file containing the value of
containerd_config.

.TP 2m
\fB\-\-data\-cache\-count\fR=\fIDATA_CACHE_COUNT\fR

Specifies the number of local SSDs to be utilized for GKE Data Cache in the node
pool.

.TP 2m
\fB\-\-disable\-pod\-cidr\-overprovision\fR

Disables pod cidr overprovision on nodes. Pod cidr overprovisioning is enabled
by default.

.TP 2m
\fB\-\-disk\-size\fR=\fIDISK_SIZE\fR

Size for node VM boot disks in GB. Defaults to 100GB.

.TP 2m
\fB\-\-disk\-type\fR=\fIDISK_TYPE\fR

Type of the node VM boot disk. For version 1.24 and later, defaults to
pd\-balanced. For versions earlier than 1.24, defaults to pd\-standard.
\fIDISK_TYPE\fR must be one of: \fBpd\-standard\fR, \fBpd\-ssd\fR,
\fBpd\-balanced\fR, \fBhyperdisk\-balanced\fR, \fBhyperdisk\-extreme\fR,
\fBhyperdisk\-throughput\fR.

.TP 2m
\fB\-\-enable\-autoprovisioning\fR

Enables Cluster Autoscaler to treat the node pool as if it was autoprovisioned.

Cluster Autoscaler will be able to delete the node pool if it's unneeded.

.TP 2m
\fB\-\-enable\-autorepair\fR

Enable node autorepair feature for a node pool.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-enable\-autorepair
.RE

Node autorepair is enabled by default for node pools using COS, COS_CONTAINERD,
UBUNTU or UBUNTU_CONTAINERD as a base image, use \-\-no\-enable\-autorepair to
disable.

See https://cloud.google.com/kubernetes\-engine/docs/how\-to/node\-auto\-repair
for more info.

.TP 2m
\fB\-\-enable\-autoupgrade\fR

Sets autoupgrade feature for a node pool.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-enable\-autoupgrade
.RE

See https://cloud.google.com/kubernetes\-engine/docs/node\-auto\-upgrades for
more info.

Enabled by default, use \fB\-\-no\-enable\-autoupgrade\fR to disable.

.TP 2m
\fB\-\-enable\-blue\-green\-upgrade\fR

Changes node pool upgrade strategy to blue\-green upgrade.

.TP 2m
\fB\-\-enable\-confidential\-nodes\fR

Enable confidential nodes for the node pool. Enabling Confidential Nodes will
create nodes using Confidential VM
https://cloud.google.com/compute/confidential\-vm/docs/about\-cvm.

.TP 2m
\fB\-\-enable\-confidential\-storage\fR

Enable confidential storage for the node pool. Enabling Confidential Storage
will create boot disk with confidential mode

.TP 2m
\fB\-\-enable\-gvnic\fR

Enable the use of GVNIC for this cluster. Requires re\-creation of nodes using
either a node\-pool upgrade or node\-pool creation.

.TP 2m
\fB\-\-enable\-image\-streaming\fR

Specifies whether to enable image streaming on node pool.

.TP 2m
\fB\-\-enable\-insecure\-kubelet\-readonly\-port\fR

Enables the Kubelet's insecure read only port.

To disable the readonly port on a cluster or node\-pool set the flag to
\f5\-\-no\-enable\-insecure\-kubelet\-readonly\-port\fR.

.TP 2m
\fB\-\-enable\-nested\-virtualization\fR

Enables the use of nested virtualization on the node pool. Defaults to
\f5false\fR. Can only be enabled on UBUNTU_CONTAINERD base image or
COS_CONTAINERD base image with version 1.28.4\-gke.1083000 and above.

.TP 2m
\fB\-\-enable\-private\-nodes\fR

Enables provisioning nodes with private IP addresses only.

The control plane still communicates with all nodes through private IP addresses
only, regardless of whether private nodes are enabled or disabled.

.TP 2m
\fB\-\-enable\-queued\-provisioning\fR

Mark the nodepool as Queued only. This means that all new nodes can be obtained
only through queuing via ProvisioningRequest API.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-enable\-queued\-provisioning
... and other required parameters, for more details see:
https://cloud.google.com/kubernetes\-engine/docs/how\-to/provisioningrequest
.RE

.TP 2m
\fB\-\-enable\-surge\-upgrade\fR

Changes node pool upgrade strategy to surge upgrade.

.TP 2m
\fB\-\-flex\-start\fR

Start the node pool with Flex Start provisioning model.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
     \-\-cluster=example\-cluster \-\-flex\-start
and other required parameters, for more details see:
https://cloud.google.com/kubernetes\-engine/docs/how\-to/provisioningrequest
.RE

.TP 2m
\fB\-\-image\-type\fR=\fIIMAGE_TYPE\fR

The image type to use for the node pool. Defaults to server\-specified.

Image Type specifies the base OS that the nodes in the node pool will run on. If
an image type is specified, that will be assigned to the node pool and all
future upgrades will use the specified image type. If it is not specified the
server will pick the default image type.

The default image type and the list of valid image types are available using the
following command.

.RS 2m
$ gcloud container get\-server\-config
.RE

.TP 2m
\fB\-\-labels\fR=[\fIKEY\fR=\fIVALUE\fR,...]

Labels to apply to the Google Cloud resources of node pools in the Kubernetes
Engine cluster. These are unrelated to Kubernetes labels. Warning: Updating this
label will causes the node(s) to be recreated.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-labels=label1=value1,label2=value2
.RE

.TP 2m
\fB\-\-linux\-sysctls\fR=\fIKEY\fR=\fIVALUE\fR,[\fIKEY\fR=\fIVALUE\fR,...]

(DEPRECATED) Linux kernel parameters to be applied to all nodes in the new node
pool as well as the pods running on the nodes.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-linux\-sysctls="net.core.somaxconn=1024,net.ipv4.tcp_rmem=4096 \e
87380 6291456"
.RE

The \f5\-\-linux\-sysctls\fR flag is deprecated. Please use
\f5\-\-system\-config\-from\-file\fR instead.

.TP 2m
\fB\-\-local\-ssd\-encryption\-mode\fR=\fILOCAL_SSD_ENCRYPTION_MODE\fR

Encryption mode for Local SSDs on the node pool. \fILOCAL_SSD_ENCRYPTION_MODE\fR
must be one of: \fBSTANDARD_ENCRYPTION\fR, \fBEPHEMERAL_KEY_ENCRYPTION\fR.

.TP 2m
\fB\-\-logging\-variant\fR=\fILOGGING_VARIANT\fR

Specifies the logging variant that will be deployed on all the nodes in the node
pool. If the node pool doesn't specify a logging variant, then the logging
variant specified for the cluster will be deployed on all the nodes in the node
pool. Valid logging variants are \f5MAX_THROUGHPUT\fR, \f5DEFAULT\fR.
\fILOGGING_VARIANT\fR must be one of:

.RS 2m
.TP 2m
\fBDEFAULT\fR
\'DEFAULT' variant requests minimal resources but may not guarantee high
throughput.
.TP 2m
\fBMAX_THROUGHPUT\fR
\'MAX_THROUGHPUT' variant requests more node resources and is able to achieve
logging throughput up to 10MB per sec.
.RE
.sp


.TP 2m
\fB\-\-machine\-type\fR=\fIMACHINE_TYPE\fR, \fB\-m\fR \fIMACHINE_TYPE\fR

The type of machine to use for nodes. Defaults to e2\-medium. The list of
predefined machine types is available using the following command:

.RS 2m
$ gcloud compute machine\-types list
.RE

You can also specify custom machine types by providing a string with the format
"custom\-CPUS\-RAM" where "CPUS" is the number of virtual CPUs and "RAM" is the
amount of RAM in MiB.

For example, to create a node pool using custom machines with 2 vCPUs and 12 GB
of RAM:

.RS 2m
$ gcloud alpha container node\-pools create high\-mem\-pool \e
    \-\-machine\-type=custom\-2\-12288
.RE

.TP 2m
\fB\-\-max\-pods\-per\-node\fR=\fIMAX_PODS_PER_NODE\fR

The max number of pods per node for this node pool.

This flag sets the maximum number of pods that can be run at the same time on a
node. This will override the value given with \-\-default\-max\-pods\-per\-node
flag set at the cluster level.

Must be used in conjunction with '\-\-enable\-ip\-alias'.

.TP 2m
\fB\-\-max\-run\-duration\fR=\fIMAX_RUN_DURATION\fR

Limit the runtime of each node in the node pool to the specified duration.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-max\-run\-duration=3600s
.RE

.TP 2m
\fB\-\-max\-surge\-upgrade\fR=\fIMAX_SURGE_UPGRADE\fR; default=1

Number of extra (surge) nodes to be created on each upgrade of the node pool.

Specifies the number of extra (surge) nodes to be created during this node
pool's upgrades. For example, running the following command will result in
creating an extra node each time the node pool is upgraded:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-max\-surge\-upgrade=1   \e
    \-\-max\-unavailable\-upgrade=0
.RE

Must be used in conjunction with '\-\-max\-unavailable\-upgrade'.

.TP 2m
\fB\-\-max\-unavailable\-upgrade\fR=\fIMAX_UNAVAILABLE_UPGRADE\fR

Number of nodes that can be unavailable at the same time on each upgrade of the
node pool.

Specifies the number of nodes that can be unavailable at the same time during
this node pool's upgrades. For example, running the following command will
result in having 3 nodes being upgraded in parallel (1 + 2), but keeping always
at least 3 (5 \- 2) available each time the node pool is upgraded:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-num\-nodes=5   \e
    \-\-max\-surge\-upgrade=1 \-\-max\-unavailable\-upgrade=2
.RE

Must be used in conjunction with '\-\-max\-surge\-upgrade'.

.TP 2m
\fB\-\-metadata\fR=\fIKEY\fR=\fIVALUE\fR,[\fIKEY\fR=\fIVALUE\fR,...]

Compute Engine metadata to be made available to the guest operating system
running on nodes within the node pool.

Each metadata entry is a key/value pair separated by an equals sign. Metadata
keys must be unique and less than 128 bytes in length. Values must be less than
or equal to 32,768 bytes in length. The total size of all keys and values must
be less than 512 KB. Multiple arguments can be passed to this flag. For example:

\f5\fI\-\-metadata key\-1=value\-1,key\-2=value\-2,key\-3=value\-3\fR\fR

Additionally, the following keys are reserved for use by Kubernetes Engine:

.RS 2m
.IP "\(em" 2m
\f5\fIcluster\-location\fR\fR
.IP "\(em" 2m
\f5\fIcluster\-name\fR\fR
.IP "\(em" 2m
\f5\fIcluster\-uid\fR\fR
.IP "\(em" 2m
\f5\fIconfigure\-sh\fR\fR
.IP "\(em" 2m
\f5\fIenable\-os\-login\fR\fR
.IP "\(em" 2m
\f5\fIgci\-update\-strategy\fR\fR
.IP "\(em" 2m
\f5\fIgci\-ensure\-gke\-docker\fR\fR
.IP "\(em" 2m
\f5\fIinstance\-template\fR\fR
.IP "\(em" 2m
\f5\fIkube\-env\fR\fR
.IP "\(em" 2m
\f5\fIstartup\-script\fR\fR
.IP "\(em" 2m
\f5\fIuser\-data\fR\fR
.RE
.sp

Google Kubernetes Engine sets the following keys by default:

.RS 2m
.IP "\(em" 2m
\f5\fIserial\-port\-logging\-enable\fR\fR
.RE
.sp

See also Compute Engine's documentation
(https://cloud.google.com/compute/docs/storing\-retrieving\-metadata) on storing
and retrieving instance metadata.

.TP 2m
\fB\-\-metadata\-from\-file\fR=\fIKEY\fR=\fILOCAL_FILE_PATH\fR,[...]

Same as \f5\fI\-\-metadata\fR\fR except that the value for the entry will be
read from a local file.

.TP 2m
\fB\-\-min\-cpu\-platform\fR=\fIPLATFORM\fR

When specified, the nodes for the new node pool will be scheduled on host with
specified CPU architecture or a newer one.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-min\-cpu\-platform=PLATFORM
.RE

To list available CPU platforms in given zone, run:

.RS 2m
$ gcloud beta compute zones describe ZONE \e
    \-\-format="value(availableCpuPlatforms)"
.RE

CPU platform selection is available only in selected zones.

.TP 2m
\fB\-\-network\-performance\-configs\fR=[\fIPROPERTY\fR=\fIVALUE\fR,...]

Configures network performance settings for the node pool. If this flag is not
specified, the pool will be created with its default network performance
configuration.

.RS 2m
.TP 2m
\fBtotal\-egress\-bandwidth\-tier\fR
Total egress bandwidth is the available outbound bandwidth from a VM, regardless
of whether the traffic is going to internal IP or external IP destinations. The
following tier values are allowed: [TIER_UNSPECIFIED,TIER_1]

.RE
.sp
.TP 2m
\fB\-\-node\-group\fR=\fINODE_GROUP\fR

Assign instances of this pool to run on the specified Google Compute Engine node
group. This is useful for running workloads on sole tenant nodes.

To see available sole tenant node\-groups, run:

.RS 2m
$ gcloud compute sole\-tenancy node\-groups list
.RE

To create a sole tenant node group, run:

.RS 2m
$ gcloud compute sole\-tenancy node\-groups create [GROUP_NAME]     \e
    \-\-location [ZONE] \-\-node\-template [TEMPLATE_NAME]     \e
    \-\-target\-size [TARGET_SIZE]
.RE

See https://cloud.google.com/compute/docs/nodes for more information on sole
tenancy and node groups.

.TP 2m
\fB\-\-node\-labels\fR=[\fINODE_LABEL\fR,...]

Applies the given Kubernetes labels on all nodes in the new node pool.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \e
    \-\-node\-labels=label1=value1,label2=value2
.RE

Updating the node pool's \-\-node\-labels flag applies the labels to the
Kubernetes Node objects for existing nodes in\-place; it does not re\-create or
replace nodes. New nodes, including ones created by resizing or re\-creating
nodes, will have these labels on the Kubernetes API Node object. The labels can
be used in the \f5nodeSelector\fR field. See
https://kubernetes.io/docs/concepts/scheduling\-eviction/assign\-pod\-node/ for
examples.

Note that Kubernetes labels, intended to associate cluster components and
resources with one another and manage resource lifecycles, are different from
Google Kubernetes Engine labels that are used for the purpose of tracking
billing and usage information.

.TP 2m
\fB\-\-node\-locations\fR=\fIZONE\fR,[\fIZONE\fR,...]

The set of zones in which the node pool's nodes should be located.

Multiple locations can be specified, separated by commas. For example:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=sample\-cluster \e
    \-\-node\-locations=us\-central1\-a,us\-central1\-b
.RE

.TP 2m
\fB\-\-node\-pool\-soak\-duration\fR=\fINODE_POOL_SOAK_DURATION\fR

Time in seconds to be spent waiting during blue\-green upgrade before deleting
the blue pool and completing the upgrade.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster  \-\-node\-pool\-soak\-duration=600s
.RE

.TP 2m
\fB\-\-node\-taints\fR=[\fINODE_TAINT\fR,...]

Applies the given kubernetes taints on all nodes in the new node pool, which can
be used with tolerations for pod scheduling.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \e
    \-\-node\-taints=key1=val1:NoSchedule,key2=val2:PreferNoSchedule
.RE

To read more about node\-taints, see
https://cloud.google.com/kubernetes\-engine/docs/node\-taints.

.TP 2m
\fB\-\-node\-version\fR=\fINODE_VERSION\fR

The Kubernetes version to use for nodes. Defaults to server\-specified.

The default Kubernetes version is available using the following command.

.RS 2m
$ gcloud container get\-server\-config
.RE

.TP 2m
\fB\-\-num\-nodes\fR=\fINUM_NODES\fR

The number of nodes in the node pool in each of the cluster's zones. Defaults to
3.

Exception: when \f5\-\-tpu\-topology\fR is specified for multi\-host TPU machine
types the number of nodes will be defaulted to \f5(product of topology)/(# of
chips per VM)\fR.

.TP 2m
\fB\-\-opportunistic\-maintenance\fR=[\fInode\-idle\-time\fR=\fINODE_IDLE_TIME\fR,\fIwindow\fR=\fIWINDOW\fR,\fImin\-nodes\fR=\fIMIN_NODES\fR,...]

Opportunistic maintenance options.

node\-idle\-time: Time to be spent waiting for node to be idle before starting
maintenance, ending with 's'. Example: "3.5s"

window: The window of time that opportunistic maintenance can run, ending with
\'s'. Example: A setting of 14 days (1209600s) implies that opportunistic
maintenance can only be ran in the 2 weeks leading up to the scheduled
maintenance date. Setting 28 days(2419200s) allows opportunistic maintenance to
run at any time in the scheduled maintenance window.

min\-nodes: Minimum number of nodes in the node pool to be available during the
opportunistic triggered maintenance.

.RS 2m
$ gcloud alpha container node\-pools create example\-cluster  \e
    \-\-opportunistic\-maintenance=node\-idle\-time=600s,window=600s,\e
min\-nodes=2
.RE

.TP 2m
\fB\-\-performance\-monitoring\-unit\fR=\fIPERFORMANCE_MONITORING_UNIT\fR

Sets the Performance Monitoring Unit level. Valid values are
\f5architectural\fR, \f5standard\fR and \f5enhanced\fR.
\fIPERFORMANCE_MONITORING_UNIT\fR must be one of:

.RS 2m
.TP 2m
\fBarchitectural\fR
Enables architectural PMU events tied to non last level cache (LLC) events.
.TP 2m
\fBenhanced\fR
Enables most documented core/L2 and LLC PMU events.
.TP 2m
\fBstandard\fR
Enables most documented core/L2 PMU events.
.RE
.sp


.TP 2m
\fB\-\-placement\-policy\fR=\fIPLACEMENT_POLICY\fR

Indicates the desired resource policy to use.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-placement\-policy my\-placement
.RE

.TP 2m
\fB\-\-placement\-type\fR=\fIPLACEMENT_TYPE\fR

Placement type allows to define the type of node placement within this node
pool.

\f5UNSPECIFIED\fR \- No requirements on the placement of nodes. This is the
default option.

\f5COMPACT\fR \- GKE will attempt to place the nodes in a close proximity to
each other. This helps to reduce the communication latency between the nodes,
but imposes additional limitations on the node pool size.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-placement\-type=COMPACT
.RE

\fIPLACEMENT_TYPE\fR must be one of: \fBUNSPECIFIED\fR, \fBCOMPACT\fR.

.TP 2m
\fB\-\-preemptible\fR

Create nodes using preemptible VM instances in the new node pool.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-preemptible
.RE

New nodes, including ones created by resize or recreate, will use preemptible VM
instances. See https://cloud.google.com/kubernetes\-engine/docs/preemptible\-vm
for more information on how to use Preemptible VMs with Kubernetes Engine.

.TP 2m
\fB\-\-resource\-manager\-tags\fR=[\fIKEY\fR=\fIVALUE\fR,...]

Applies the specified comma\-separated resource manager tags that has the
GCE_FIREWALL purpose to all nodes in the new node pool.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create example\-node\-pool \e
    \-\-resource\-manager\-tags=tagKeys/1234=tagValues/2345
$ gcloud alpha container node\-pools create example\-node\-pool \e
    \-\-resource\-manager\-tags=my\-project/key1=value1
$ gcloud alpha container node\-pools create example\-node\-pool \e
    \-\-resource\-manager\-tags=12345/key1=value1,23456/key2=value2
$ gcloud alpha container node\-pools create example\-node\-pool \e
    \-\-resource\-manager\-tags=
.RE

All nodes, including nodes that are resized or re\-created, will have the
specified tags on the corresponding Instance object in the Compute Engine API.
You can reference these tags in network firewall policy rules. For instructions,
see https://cloud.google.com/firewall/docs/use\-tags\-for\-firewalls.

.TP 2m
\fB\-\-sandbox\fR=[\fItype\fR=\fITYPE\fR]

Enables the requested sandbox on all nodes in the node pool.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-sandbox="type=gvisor"
.RE

The only supported type is 'gvisor'.

.TP 2m
\fB\-\-secondary\-boot\-disk\fR=[\fIdisk\-image\fR=\fIDISK_IMAGE\fR,[\fImode\fR=\fIMODE\fR],...]

Attaches secondary boot disks to all nodes.

.RS 2m
.TP 2m
\fBdisk\-image\fR
(Required) The full resource path to the source disk image to create the
secondary boot disks from.

.TP 2m
\fBmode\fR
(Optional) The configuration mode for the secondary boot disks. The default
value is "CONTAINER_IMAGE_CACHE".

.RE
.sp
.TP 2m
\fB\-\-shielded\-integrity\-monitoring\fR

Enables monitoring and attestation of the boot integrity of the instance. The
attestation is performed against the integrity policy baseline. This baseline is
initially derived from the implicitly trusted boot image when the instance is
created.

.TP 2m
\fB\-\-shielded\-secure\-boot\fR

The instance will boot with secure boot enabled.

.TP 2m
\fB\-\-sole\-tenant\-min\-node\-cpus\fR=\fISOLE_TENANT_MIN_NODE_CPUS\fR

A integer value that specifies the minimum number of vCPUs that each sole tenant
node must have to use CPU overcommit. If not specified, the CPU overcommit
feature is disabled.

.TP 2m
\fB\-\-sole\-tenant\-node\-affinity\-file\fR=\fISOLE_TENANT_NODE_AFFINITY_FILE\fR

JSON/YAML file containing the configuration of desired sole tenant nodes onto
which this node pool could be backed by. These rules filter the nodes according
to their node affinity labels. A node's affinity labels come from the node
template of the group the node is in.

The file should contain a list of a JSON/YAML objects. For an example, see
https://cloud.google.com/compute/docs/nodes/provisioning\-sole\-tenant\-vms#configure_node_affinity_labels.
The following list describes the fields:

.RS 2m
.TP 2m
\fBkey\fR
Corresponds to the node affinity label keys of the Node resource.
.TP 2m
\fBoperator\fR
Specifies the node selection type. Must be one of: \f5IN\fR: Requires Compute
Engine to seek for matched nodes. \f5NOT_IN\fR: Requires Compute Engine to avoid
certain nodes.
.TP 2m
\fBvalues\fR
Optional. A list of values which correspond to the node affinity label values of
the Node resource.

.RE
.sp
.TP 2m
\fB\-\-spot\fR

Create nodes using spot VM instances in the new node pool.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-spot
.RE

New nodes, including ones created by resize or recreate, will use spot VM
instances.

.TP 2m
\fB\-\-standard\-rollout\-policy\fR=[\fIbatch\-node\-count\fR=\fIBATCH_NODE_COUNT\fR,\fIbatch\-percent\fR=\fIBATCH_NODE_PERCENTAGE\fR,\fIbatch\-soak\-duration\fR=\fIBATCH_SOAK_DURATION\fR,...]

Standard rollout policy options for blue\-green upgrade.

Batch sizes are specified by one of, batch\-node\-count or batch\-percent. The
duration between batches is specified by batch\-soak\-duration.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster  \e
    \-\-standard\-rollout\-policy=batch\-node\-count=3,\e
batch\-soak\-duration=60s
.RE

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster  \e
    \-\-standard\-rollout\-policy=batch\-percent=0.3,\e
batch\-soak\-duration=60s
.RE

.TP 2m
\fB\-\-storage\-pools\fR=\fISTORAGE_POOL\fR,[...]

A list of storage pools where the node pool's boot disks will be provisioned.

STORAGE_POOL must be in the format
projects/project/zones/zone/storagePools/storagePool

.TP 2m
\fB\-\-system\-config\-from\-file\fR=\fIPATH_TO_FILE\fR

Path of the YAML/JSON file that contains the node configuration, including Linux
kernel parameters (sysctls) and kubelet configs.

Examples:

.RS 2m
kubeletConfig:
  cpuManagerPolicy: static
  memoryManager:
    policy: Static
  topologyManager:
    policy: BestEffort
    scope: pod
linuxConfig:
  sysctl:
    net.core.somaxconn: '2048'
    net.ipv4.tcp_rmem: '4096 87380 6291456'
  hugepageConfig:
    hugepage_size2m: '1024'
    hugepage_size1g: '2'
  swapConfig:
    enabled: true
    bootDiskProfile:
      swapSizeGib: 8
  cgroupMode: 'CGROUP_MODE_V2'
.RE

List of supported kubelet configs in 'kubeletConfig'.


.TS
tab(	);
l(36)B l(90)B
l(36) l(90).
KEY	VALUE
cpuManagerPolicy	either 'static' or 'none'
cpuCFSQuota	true or false (enabled by default)
cpuCFSQuotaPeriod	interval (e.g., '100ms'. The value must be between 1ms and 1 second, inclusive.)
memoryManager	specify memory manager policy
topologyManager	specify topology manager policy and scope
podPidsLimit	integer (The value must be greater than or equal to 1024 and less than 4194304.)
containerLogMaxSize	positive number plus unit suffix (e.g., '100Mi', '0.2Gi'. The value must be between 10Mi and 500Mi, inclusive.)
containerLogMaxFiles	integer (The value must be between [2, 10].)
imageGcLowThresholdPercent	integer (The value must be between [10, 85], and lower than imageGcHighThresholdPercent.)
imageGcHighThresholdPercent	integer (The value must be between [10, 85], and greater than imageGcLowThresholdPercent.)
imageMinimumGcAge	interval (e.g., '100s', '1m'. The value must be less than '2m'.)
imageMaximumGcAge	interval (e.g., '100s', '1m'. The value must be greater than imageMinimumGcAge.)
evictionSoft	specify eviction soft thresholds
evictionSoftGracePeriod	specify eviction soft grace period
evictionMinimumReclaim	specify eviction minimum reclaim thresholds
evictionMaxPodGracePeriodSeconds	integer (Max grace period for pod termination during eviction, in seconds. The value must be between [0, 300].)
allowedUnsafeSysctls	list of sysctls (Allowlisted groups: 'kernel.shm*', 'kernel.msg*', 'kernel.sem', 'fs.mqueue.*', and 'net.*', and sysctls under the groups.)
singleProcessOomKill	true or false
maxParallelImagePulls	integer (The value must be between [2, 5].)
.TE


List of supported keys in memoryManager in 'kubeletConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
policy	either 'Static' or 'None'
.TE

List of supported keys in topologyManager in 'kubeletConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
policy	either 'none' or 'best-effort' or 'single-numa-node' or 'restricted'
scope	either 'pod' or 'container'
.TE

List of supported keys in evictionSoft in 'kubeletConfig'.


.TS
tab(	);
l(25)B l(93)B
l(25) l(93).
KEY	VALUE
memoryAvailable	quantity (e.g., '100Mi', '1Gi'. Represents the amount of memory available before soft eviction. The value must be at least 100Mi and less than 50% of the node's memory.)
nodefsAvailable	percentage (e.g., '20%'. Represents the nodefs available before soft eviction. The value must be between 10% and 50%, inclusive.)
nodefsInodesFree	percentage (e.g., '20%'. Represents the nodefs inodes free before soft eviction. The value must be between 5% and 50%, inclusive.)
imagefsAvailable	percentage (e.g., '20%'. Represents the imagefs available before soft eviction. The value must be between 15% and 50%, inclusive.)
imagefsInodesFree	percentage (e.g., '20%'. Represents the imagefs inodes free before soft eviction. The value must be between 5% and 50%, inclusive.)
pidAvailable	percentage (e.g., '20%'. Represents the pid available before soft eviction. The value must be between 10% and 50%, inclusive.)
.TE

List of supported keys in evictionSoftGracePeriod in 'kubeletConfig'.


.TS
tab(	);
l(25)B l(93)B
l(25) l(93).
KEY	VALUE
memoryAvailable	duration (e.g., '30s', '1m'. The grace period for soft eviction for this resource. The value must be positive and no more than '5m'.)
nodefsAvailable	duration (e.g., '30s', '1m'. The grace period for soft eviction for this resource. The value must be positive and no more than '5m'.)
nodefsInodesFree	duration (e.g., '30s', '1m'. The grace period for soft eviction for this resource. The value must be positive and no more than '5m'.)
imagefsAvailable	duration (e.g., '30s', '1m'. The grace period for soft eviction for this resource. The value must be positive and no more than '5m'.)
imagefsInodesFree	duration (e.g., '30s', '1m'. The grace period for soft eviction for this resource. The value must be positive and no more than '5m'.)
pidAvailable	duration (e.g., '30s', '1m'. The grace period for soft eviction for this resource. The value must be positive and no more than '5m'.)
.TE

List of supported keys in evictionMinimumReclaim in 'kubeletConfig'.


.TS
tab(	);
l(25)B l(93)B
l(25) l(93).
KEY	VALUE
memoryAvailable	percentage (e.g., '5%'. Represents the minimum reclaim threshold for memory available. The value must be positive and no more than 10%.)
nodefsAvailable	percentage (e.g., '5%'. Represents the minimum reclaim threshold for nodefs available. The value must be positive and no more than 10%.)
nodefsInodesFree	percentage (e.g., '5%'. Represents the minimum reclaim threshold for nodefs inodes free. The value must be positive and no more than 10%.)
imagefsAvailable	percentage (e.g., '5%'. Represents the minimum reclaim threshold for imagefs available. The value must be positive and no more than 10%.)
imagefsInodesFree	percentage (e.g., '5%'. Represents the minimum reclaim threshold for imagefs inodes free. The value must be positive and no more than 10%.)
pidAvailable	percentage (e.g., '5%'. Represents the minimum reclaim threshold for pid available. The value must be positive and no more than 10%.)
.TE


List of supported sysctls in 'linuxConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
net.core.netdev_max_backlog	Any positive integer, less than 2147483647
net.core.rmem_default	Must be between [2304, 2147483647]
net.core.rmem_max	Must be between [2304, 2147483647]
net.core.wmem_default	Must be between [4608, 2147483647]
net.core.wmem_max	Must be between [4608, 2147483647]
net.core.optmem_max	Any positive integer, less than 2147483647
net.core.somaxconn	Must be between [128, 2147483647]
net.ipv4.tcp_rmem	Any positive integer tuple
net.ipv4.tcp_wmem	Any positive integer tuple
net.ipv4.tcp_tw_reuse	Must be {0, 1, 2}
net.ipv4.tcp_mtu_probing	Must be {0, 1, 2}
net.ipv4.tcp_max_orphans	Must be between [16384, 262144]
net.ipv4.tcp_max_tw_buckets	Must be between [4096, 2147483647]
net.ipv4.tcp_syn_retries	Must be between [1, 127]
net.ipv4.tcp_ecn	Must be {0, 1, 2}
net.ipv4.tcp_congestion_control	Must be string containing only letters and numbers
net.netfilter.nf_conntrack_max	Must be between [65536, 4194304]
net.netfilter.nf_conntrack_buckets	Must be between [65536, 524288]. Recommend setting: nf_conntrack_max = nf_conntrack_buckets * 4
net.netfilter.nf_conntrack_tcp_timeout_close_wait	Must be between [60, 3600]
net.netfilter.nf_conntrack_tcp_timeout_time_wait	Must be between [1, 600]
net.netfilter.nf_conntrack_tcp_timeout_established	Must be between [600, 86400]
net.netfilter.nf_conntrack_acct	Must be {0, 1}
kernel.shmmni	Must be between [4096, 32768]
kernel.shmmax	Must be between [0, 18446744073692774399]
kernel.shmall	Must be between [0, 18446744073692774399]
kernel.perf_event_paranoid	Must be {-1, 0, 1, 2, 3}
kernel.sched_rt_runtime_us	Must be [-1, 1000000]
kernel.softlockup_panic	Must be {0, 1}
kernel.yama.ptrace_scope	Must be {0, 1, 2, 3}
kernel.kptr_restrict	Must be {0, 1, 2}
kernel.dmesg_restrict	Must be {0, 1}
kernel.sysrq	Must be [0, 511]
fs.aio-max-nr	Must be between [65536, 4194304]
fs.file-max	Must be between [104857, 67108864]
fs.inotify.max_user_instances	Must be between [8192, 1048576]
fs.inotify.max_user_watches	Must be between [8192, 1048576]
fs.nr_open	Must be between [1048576, 2147483584]
vm.dirty_background_ratio	Must be between [1, 100]
vm.dirty_background_bytes	Must be between [0, 68719476736]
vm.dirty_expire_centisecs	Must be between [0, 6000]
vm.dirty_ratio	Must be between [1, 100]
vm.dirty_bytes	Must be between [0, 68719476736]
vm.dirty_writeback_centisecs	Must be between [0, 1000]
vm.max_map_count	Must be between [65536, 2147483647]
vm.overcommit_memory	Must be one of {0, 1, 2}
vm.overcommit_ratio	Must be between [0, 100]
vm.vfs_cache_pressure	Must be between [0, 100]
vm.swappiness	Must be between [0, 200]
vm.watermark_scale_factor	Must be between [10, 3000]
vm.min_free_kbytes	Must be between [67584, 1048576]
.TE

List of supported hugepage size in 'hugepageConfig'.


.TS
tab(	);
l(16)B l(45)B
l(16) l(45).
KEY	VALUE
hugepage_size2m	Number of 2M huge pages, any positive integer
hugepage_size1g	Number of 1G huge pages, any positive integer
.TE

List of supported keys in 'swapConfig' under 'linuxConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
enabled	boolean
encryptionConfig	specify encryption settings for the swap space
bootDiskProfile	specify swap on the node's boot disk
ephemeralLocalSsdProfile	specify swap on the local SSD shared with pod ephemeral storage
dedicatedLocalSsdProfile	specify swap on a new, separate local NVMe SSD exclusively for swap
.TE

List of supported keys in 'encryptionConfig' under 'swapConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
disabled	boolean
.TE

List of supported keys in 'bootDiskProfile' under 'swapConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
swapSizeGib	integer
swapSizePercent	integer
.TE

List of supported keys in 'ephemeralLocalSsdProfile' under 'swapConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
swapSizeGib	integer
swapSizePercent	integer
.TE

List of supported keys in 'dedicatedLocalSsdProfile' under 'swapConfig'.


.TS
tab(	);
l(42)B l(42)B
l(42) l(42).
KEY	VALUE
diskCount	integer
.TE


Allocated hugepage size should not exceed 60% of available memory on the node.
For example, c2d\-highcpu\-4 has 8GB memory, total allocated hugepage of 2m and
1g should not exceed 8GB * 0.6 = 4.8GB.

1G hugepages are only available in following machine familes: c3, m2, c2d, c3d,
h3, m3, a2, a3, g2.

Supported values for 'cgroupMode' under 'linuxConfig'.

.RS 2m
.IP "\(em" 2m
\f5CGROUP_MODE_V1\fR: Use cgroupv1 on the node pool.
.IP "\(em" 2m
\f5CGROUP_MODE_V2\fR: Use cgroupv2 on the node pool.
.IP "\(em" 2m
\f5CGROUP_MODE_UNSPECIFIED\fR: Use the default GKE cgroup configuration.
.RE
.sp

Supported values for 'transparentHugepageEnabled' under 'linuxConfig' which
controls transparent hugepage support for anonymous memory.

.RS 2m
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_ENABLED_ALWAYS\fR: Transparent hugepage is enabled
system wide.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_ENABLED_MADVISE\fR: Transparent hugepage is enabled
inside MADV_HUGEPAGE regions. This is the default kernel configuration.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_ENABLED_NEVER\fR: Transparent hugepage is disabled.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_ENABLED_UNSPECIFIED\fR: Default value. GKE will not
modify the kernel configuration.
.RE
.sp

Supported values for 'transparentHugepageDefrag' under 'linuxConfig' which
defines the transparent hugepage defrag configuration on the node.

.RS 2m
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_DEFRAG_ALWAYS\fR: It means that an application
requesting THP will stall on allocation failure and directly reclaim pages and
compact memory in an effort to allocate a THP immediately.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_DEFRAG_DEFER\fR: It means that an application will wake
kswapd in the background to reclaim pages and wake kcompactd to compact memory
so that THP is available in the near future. It is the responsibility of
khugepaged to then install the THP pages later.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_DEFRAG_DEFER_WITH_MADVISE\fR: It means that an
application will enter direct reclaim and compaction like always, but only for
regions that have used madvise(MADV_HUGEPAGE); all other regions will wake
kswapd in the background to reclaim pages and wake kcompactd to compact memory
so that THP is available in the near future.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_DEFRAG_MADVISE\fR: It means that an application will
enter direct reclaim and compaction like always, but only for regions that have
used madvise(MADV_HUGEPAGE); all other regions will wake kswapd in the
background to reclaim pages and wake kcompactd to compact memory so that THP is
available in the near future.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_DEFRAG_NEVER\fR: It means that an application will never
enter direct reclaim or compaction.
.IP "\(em" 2m
\f5TRANSPARENT_HUGEPAGE_DEFRAG_UNSPECIFIED\fR: Default value. GKE will not
modify the kernel configuration.
.RE
.sp

Note, updating the system configuration of an existing node pool requires
recreation of the nodes which which might cause a disruption.

Use a full or relative path to a local file containing the value of
system_config.

.TP 2m
\fB\-\-tags\fR=\fITAG\fR,[\fITAG\fR,...]

Applies the given Compute Engine tags (comma separated) on all nodes in the new
node\-pool. Example:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-tags=tag1,tag2
.RE

New nodes, including ones created by resize or recreate, will have these tags on
the Compute Engine API instance object and can be used in firewall rules. See
https://cloud.google.com/sdk/gcloud/reference/compute/firewall\-rules/create for
examples.

.TP 2m
\fB\-\-threads\-per\-core\fR=\fITHREADS_PER_CORE\fR

The number of visible threads per physical core for each node. To disable
simultaneous multithreading (SMT) set this to 1.

.TP 2m
\fB\-\-tpu\-topology\fR=\fITPU_TOPOLOGY\fR

The desired physical topology for the PodSlice.

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \-\-tpu\-topology
.RE

.TP 2m
\fB\-\-windows\-os\-version\fR=\fIWINDOWS_OS_VERSION\fR

Specifies the Windows Server Image to use when creating a Windows node pool.
Valid variants can be "ltsc2019", "ltsc2022". It means using LTSC2019 server
image or LTSC2022 server image. If the node pool doesn't specify a Windows
Server Image Os version, then Ltsc2019 will be the default one to use.
\fIWINDOWS_OS_VERSION\fR must be one of: \fBltsc2019\fR, \fBltsc2022\fR.

.TP 2m
\fB\-\-workload\-metadata\fR=\fIWORKLOAD_METADATA\fR

Type of metadata server available to pods running in the node pool.
\fIWORKLOAD_METADATA\fR must be one of:

.RS 2m
.TP 2m
\fBEXPOSED\fR
[DEPRECATED] Pods running in this node pool have access to the node's underlying
Compute Engine Metadata Server.
.TP 2m
\fBGCE_METADATA\fR
Pods running in this node pool have access to the node's underlying Compute
Engine Metadata Server.
.TP 2m
\fBGKE_METADATA\fR
Run the Kubernetes Engine Metadata Server on this node. The Kubernetes Engine
Metadata Server exposes a metadata API to workloads that is compatible with the
V1 Compute Metadata APIs exposed by the Compute Engine and App Engine Metadata
Servers. This feature can only be enabled if Workload Identity is enabled at the
cluster level.
.TP 2m
\fBGKE_METADATA_SERVER\fR
[DEPRECATED] Run the Kubernetes Engine Metadata Server on this node. The
Kubernetes Engine Metadata Server exposes a metadata API to workloads that is
compatible with the V1 Compute Metadata APIs exposed by the Compute Engine and
App Engine Metadata Servers. This feature can only be enabled if Workload
Identity is enabled at the cluster level.
.TP 2m
\fBSECURE\fR
[DEPRECATED] Prevents pods not in hostNetwork from accessing certain VM
metadata, specifically kube\-env, which contains Kubelet credentials, and the
instance identity token. This is a temporary security solution available while
the bootstrapping process for cluster nodes is being redesigned with significant
security improvements. This feature is scheduled to be deprecated in the future
and later removed.
.RE
.sp


.TP 2m

At most one of these can be specified:


.RS 2m
.TP 2m
\fB\-\-create\-pod\-ipv4\-range\fR=[\fIKEY\fR=\fIVALUE\fR,...]

Create a new pod range for the node pool. The name and range of the pod range
can be customized via optional \f5\fIname\fR\fR and \f5\fIrange\fR\fR keys.

\f5\fIname\fR\fR specifies the name of the secondary range to be created.

\f5\fIrange\fR\fR specifies the IP range for the new secondary range. This can
either be a netmask size (e.g. "/20") or a CIDR range (e.g. "10.0.0.0/20"). If a
netmask size is specified, the IP is automatically taken from the free space in
the cluster's network.

Must be used in VPC native clusters. Can not be used in conjunction with the
\f5\-\-pod\-ipv4\-range\fR option.

Examples:

Create a new pod range with a default name and size.

.RS 2m
$ gcloud alpha container node\-pools create \-\-create\-pod\-ipv4\-range ""
.RE

Create a new pod range named \f5\fImy\-range\fR\fR with netmask of size
\f5\fI21\fR\fR.

.RS 2m
$ gcloud alpha container node\-pools create \e
    \-\-create\-pod\-ipv4\-range name=my\-range,range=/21
.RE

Create a new pod range with a default name with the primary range of
\f5\fI10.100.0.0/16\fR\fR.

.RS 2m
$ gcloud alpha container node\-pools create \e
    \-\-create\-pod\-ipv4\-range range=10.100.0.0/16
.RE

Create a new pod range with the name \f5\fImy\-range\fR\fR with a default range.

.RS 2m
$ gcloud alpha container node\-pools create \e
    \-\-create\-pod\-ipv4\-range name=my\-range
.RE

Must be used in VPC native clusters. Can not be used in conjunction with the
\f5\-\-pod\-ipv4\-range\fR option.

.TP 2m
\fB\-\-pod\-ipv4\-range\fR=\fINAME\fR

Set the pod range to be used as the source for pod IPs for the pods in this node
pool. NAME must be the name of an existing subnetwork secondary range in the
subnetwork for this cluster.

Must be used in VPC native clusters. Cannot be used with
\f5\-\-create\-ipv4\-pod\-range\fR.

Examples:

Specify a pod range called \f5\fIother\-range\fR\fR

.RS 2m
$ gcloud alpha container node\-pools create \e
    \-\-pod\-ipv4\-range other\-range
.RE

.RE
.sp
.TP 2m

Cluster autoscaling


.RS 2m
.TP 2m
\fB\-\-enable\-autoscaling\fR

Enables autoscaling for a node pool.

Enables autoscaling in the node pool specified by \-\-node\-pool or the default
node pool if \-\-node\-pool is not provided. If not already, \-\-max\-nodes or
\-\-total\-max\-nodes must also be set.

.TP 2m
\fB\-\-location\-policy\fR=\fILOCATION_POLICY\fR

Location policy specifies the algorithm used when scaling\-up the node pool.

.RS 2m
.IP "\(bu" 2m
\f5BALANCED\fR \- Is a best effort policy that aims to balance the sizes of
available zones.
.IP "\(bu" 2m
\f5ANY\fR \- Instructs the cluster autoscaler to prioritize utilization of
unused reservations, and reduces preemption risk for Spot VMs.
.RE
.sp

\fILOCATION_POLICY\fR must be one of: \fBBALANCED\fR, \fBANY\fR.

.TP 2m
\fB\-\-max\-nodes\fR=\fIMAX_NODES\fR

Maximum number of nodes per zone in the node pool.

Maximum number of nodes per zone to which the node pool specified by
\-\-node\-pool (or default node pool if unspecified) can scale. Ignored unless
\-\-enable\-autoscaling is also specified.

.TP 2m
\fB\-\-min\-nodes\fR=\fIMIN_NODES\fR

Minimum number of nodes per zone in the node pool.

Minimum number of nodes per zone to which the node pool specified by
\-\-node\-pool (or default node pool if unspecified) can scale. Ignored unless
\-\-enable\-autoscaling is also specified.

.TP 2m
\fB\-\-total\-max\-nodes\fR=\fITOTAL_MAX_NODES\fR

Maximum number of all nodes in the node pool.

Maximum number of all nodes to which the node pool specified by \-\-node\-pool
(or default node pool if unspecified) can scale. Ignored unless
\-\-enable\-autoscaling is also specified.

.TP 2m
\fB\-\-total\-min\-nodes\fR=\fITOTAL_MIN_NODES\fR

Minimum number of all nodes in the node pool.

Minimum number of all nodes to which the node pool specified by \-\-node\-pool
(or default node pool if unspecified) can scale. Ignored unless
\-\-enable\-autoscaling is also specified.

.RE
.sp
.TP 2m

Specifies minimum number of nodes to be created when best effort provisioning
enabled.



.RS 2m
.TP 2m
\fB\-\-enable\-best\-effort\-provision\fR

Enable best effort provision for nodes

.TP 2m
\fB\-\-min\-provision\-nodes\fR=\fIMIN_PROVISION_NODES\fR

Specifies the minimum number of nodes to be provisioned during creation

.RE
.sp
.TP 2m

At most one of these can be specified:


.RS 2m
.TP 2m
\fB\-\-ephemeral\-storage\fR[=[\fIlocal\-ssd\-count\fR=\fILOCAL\-SSD\-COUNT\fR]]

Parameters for the ephemeral storage filesystem. If unspecified, ephemeral
storage is backed by the boot disk.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example cluster \-\-ephemeral\-storage local\-ssd\-count=2
.RE

\'local\-ssd\-count' specifies the number of local SSDs to use to back ephemeral
storage. Local SDDs use NVMe interfaces. For first\- and second\-generation
machine types, a nonzero count field is required for local ssd to be configured.
For third\-generation machine types, the count field is optional because the
count is inferred from the machine type.

See https://cloud.google.com/compute/docs/disks/local\-ssd for more information.

.TP 2m
\fB\-\-ephemeral\-storage\-local\-ssd\fR[=[\fIcount\fR=\fICOUNT\fR]]

Parameters for the ephemeral storage filesystem. If unspecified, ephemeral
storage is backed by the boot disk.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example cluster \-\-ephemeral\-storage\-local\-ssd count=2
.RE

\'count' specifies the number of local SSDs to use to back ephemeral storage.
Local SDDs use NVMe interfaces. For first\- and second\-generation machine
types, a nonzero count field is required for local ssd to be configured. For
third\-generation machine types, the count field is optional because the count
is inferred from the machine type.

See https://cloud.google.com/compute/docs/disks/local\-ssd for more information.

.TP 2m
\fB\-\-local\-nvme\-ssd\-block\fR[=[\fIcount\fR=\fICOUNT\fR]]

Adds the requested local SSDs on all nodes in default node pool(s) in the new
cluster.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example cluster \-\-local\-nvme\-ssd\-block count=2
.RE

\'count' must be between 1\-8


New nodes, including ones created by resize or recreate, will have these local
SSDs.

For first\- and second\-generation machine types, a nonzero count field is
required for local ssd to be configured. For third\-generation machine types,
the count field is optional because the count is inferred from the machine type.

See https://cloud.google.com/compute/docs/disks/local\-ssd for more information.

.TP 2m
\fB\-\-local\-ssd\-count\fR=\fILOCAL_SSD_COUNT\fR

\-\-local\-ssd\-count is the equivalent of using \-\-local\-ssd\-volumes with
type=scsi,format=fs

The number of local SSD disks to provision on each node, formatted and mounted
in the filesystem.

Local SSDs have a fixed 375 GB capacity per device. The number of disks that can
be attached to an instance is limited by the maximum number of disks available
on a machine, which differs by compute zone. See
https://cloud.google.com/compute/docs/disks/local\-ssd for more information.

.TP 2m
\fB\-\-local\-ssd\-volumes\fR=[[\fIcount\fR=\fICOUNT\fR],[\fItype\fR=\fITYPE\fR],[\fIformat\fR=\fIFORMAT\fR],...]

Adds the requested local SSDs on all nodes in default node pool(s) in the new
cluster.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \e
    \-\-local\-ssd\-volumes count=2,type=nvme,format=fs
.RE

\'count' must be between 1\-8

\'type' must be either scsi or nvme

\'format' must be either fs or block

New nodes, including ones created by resize or recreate, will have these local
SSDs.

Local SSDs have a fixed 375 GB capacity per device. The number of disks that can
be attached to an instance is limited by the maximum number of disks available
on a machine, which differs by compute zone. See
https://cloud.google.com/compute/docs/disks/local\-ssd for more information.

.RE
.sp
.TP 2m

At most one of these can be specified:


.RS 2m
.TP 2m
\fB\-\-location\fR=\fILOCATION\fR

Compute zone or region (e.g. us\-central1\-a or us\-central1) for the cluster.
Overrides the default compute/region or compute/zone value for this command
invocation. Prefer using this flag over the \-\-region or \-\-zone flags.

.TP 2m
\fB\-\-region\fR=\fIREGION\fR

Compute region (e.g. us\-central1) for a regional cluster. Overrides the default
compute/region property value for this command invocation.

.TP 2m
\fB\-\-zone\fR=\fIZONE\fR, \fB\-z\fR \fIZONE\fR

Compute zone (e.g. us\-central1\-a) for a zonal cluster. Overrides the default
compute/zone property value for this command invocation.

.RE
.sp
.TP 2m

Specifies the reservation for the node pool.


.RS 2m
.TP 2m
\fB\-\-reservation\fR=\fIRESERVATION\fR

The name of the reservation, required when
\f5\-\-reservation\-affinity=specific\fR.

.TP 2m
\fB\-\-reservation\-affinity\fR=\fIRESERVATION_AFFINITY\fR

The type of the reservation for the node pool. \fIRESERVATION_AFFINITY\fR must
be one of: \fBany\fR, \fBnone\fR, \fBspecific\fR.

.RE
.sp
.TP 2m

Options to specify the node identity.


.RS 2m
.TP 2m

Scopes options.


.RS 2m
.TP 2m
\fB\-\-scopes\fR=[\fISCOPE\fR,...]; default="gke\-default"

Specifies scopes for the node instances.

Examples:

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \e
    \-\-scopes=https://www.googleapis.com/auth/devstorage.read_only
.RE

.RS 2m
$ gcloud alpha container node\-pools create node\-pool\-1 \e
    \-\-cluster=example\-cluster \e
    \-\-scopes=bigquery,storage\-rw,compute\-ro
.RE

Multiple scopes can be specified, separated by commas. Various scopes are
automatically added based on feature usage. Such scopes are not added if an
equivalent scope already exists.

.RS 2m
.IP "\(em" 2m
\f5monitoring\-write\fR: always added to ensure metrics can be written
.IP "\(em" 2m
\f5logging\-write\fR: added if Cloud Logging is enabled
(\f5\-\-enable\-cloud\-logging\fR/\f5\-\-logging\fR)
.IP "\(em" 2m
\f5monitoring\fR: added if Cloud Monitoring is enabled
(\f5\-\-enable\-cloud\-monitoring\fR/\f5\-\-monitoring\fR)
.IP "\(em" 2m
\f5gke\-default\fR: added for Autopilot clusters that use the default service
account
.IP "\(em" 2m
\f5cloud\-platform\fR: added for Autopilot clusters that use any other service
account
.RE
.sp

SCOPE can be either the full URI of the scope or an alias. \fBDefault\fR scopes
are assigned to all instances. Available aliases are:


.TS
tab(	);
lB lB
l l.
Alias	URI
bigquery	https://www.googleapis.com/auth/bigquery
cloud-platform	https://www.googleapis.com/auth/cloud-platform
cloud-source-repos	https://www.googleapis.com/auth/source.full_control
cloud-source-repos-ro	https://www.googleapis.com/auth/source.read_only
compute-ro	https://www.googleapis.com/auth/compute.readonly
compute-rw	https://www.googleapis.com/auth/compute
datastore	https://www.googleapis.com/auth/datastore
default	https://www.googleapis.com/auth/devstorage.read_only
	https://www.googleapis.com/auth/logging.write
	https://www.googleapis.com/auth/monitoring.write
	https://www.googleapis.com/auth/pubsub
	https://www.googleapis.com/auth/service.management.readonly
	https://www.googleapis.com/auth/servicecontrol
	https://www.googleapis.com/auth/trace.append
gke-default	https://www.googleapis.com/auth/devstorage.read_only
	https://www.googleapis.com/auth/logging.write
	https://www.googleapis.com/auth/monitoring
	https://www.googleapis.com/auth/service.management.readonly
	https://www.googleapis.com/auth/servicecontrol
	https://www.googleapis.com/auth/trace.append
logging-write	https://www.googleapis.com/auth/logging.write
monitoring	https://www.googleapis.com/auth/monitoring
monitoring-read	https://www.googleapis.com/auth/monitoring.read
monitoring-write	https://www.googleapis.com/auth/monitoring.write
pubsub	https://www.googleapis.com/auth/pubsub
service-control	https://www.googleapis.com/auth/servicecontrol
service-management	https://www.googleapis.com/auth/service.management.readonly
sql (deprecated)	https://www.googleapis.com/auth/sqlservice
sql-admin	https://www.googleapis.com/auth/sqlservice.admin
storage-full	https://www.googleapis.com/auth/devstorage.full_control
storage-ro	https://www.googleapis.com/auth/devstorage.read_only
storage-rw	https://www.googleapis.com/auth/devstorage.read_write
taskqueue	https://www.googleapis.com/auth/taskqueue
trace	https://www.googleapis.com/auth/trace.append
userinfo-email	https://www.googleapis.com/auth/userinfo.email
.TE

DEPRECATION WARNING: https://www.googleapis.com/auth/sqlservice account scope
and \f5sql\fR alias do not provide SQL instance management capabilities and have
been deprecated. Please, use https://www.googleapis.com/auth/sqlservice.admin or
\f5sql\-admin\fR to manage your Google SQL Service instances.

.RE
.sp
.TP 2m
\fB\-\-service\-account\fR=\fISERVICE_ACCOUNT\fR

The Google Cloud Platform Service Account to be used by the node VMs. If a
service account is specified, the cloud\-platform and userinfo.email scopes are
used. If no Service Account is specified, the project default service account is
used.


.RE
.RE
.sp

.SH "GCLOUD WIDE FLAGS"

These flags are available to all commands: \-\-access\-token\-file, \-\-account,
\-\-billing\-project, \-\-configuration, \-\-flags\-file, \-\-flatten,
\-\-format, \-\-help, \-\-impersonate\-service\-account, \-\-log\-http,
\-\-project, \-\-quiet, \-\-trace\-token, \-\-user\-output\-enabled,
\-\-verbosity.

Run \fB$ gcloud help\fR for details.



.SH "NOTES"

This command is currently in alpha and might change without notice. If this
command fails with API permission errors despite specifying the correct project,
you might be trying to access an API with an invitation\-only early access
allowlist. These variants are also available:

.RS 2m
$ gcloud container node\-pools create
.RE

.RS 2m
$ gcloud beta container node\-pools create
.RE