Package dataproc
Overview ▹
Index ▹
Variables
var ClusterOperationStatus_State_name = map[int32]string{ 0: "UNKNOWN", 1: "PENDING", 2: "RUNNING", 3: "DONE", }
var ClusterOperationStatus_State_value = map[string]int32{ "UNKNOWN": 0, "PENDING": 1, "RUNNING": 2, "DONE": 3, }
var ClusterStatus_State_name = map[int32]string{ 0: "UNKNOWN", 1: "CREATING", 2: "RUNNING", 3: "ERROR", 4: "DELETING", 5: "UPDATING", }
var ClusterStatus_State_value = map[string]int32{ "UNKNOWN": 0, "CREATING": 1, "RUNNING": 2, "ERROR": 3, "DELETING": 4, "UPDATING": 5, }
var JobStatus_State_name = map[int32]string{ 0: "STATE_UNSPECIFIED", 1: "PENDING", 8: "SETUP_DONE", 2: "RUNNING", 3: "CANCEL_PENDING", 7: "CANCEL_STARTED", 4: "CANCELLED", 5: "DONE", 6: "ERROR", }
var JobStatus_State_value = map[string]int32{ "STATE_UNSPECIFIED": 0, "PENDING": 1, "SETUP_DONE": 8, "RUNNING": 2, "CANCEL_PENDING": 3, "CANCEL_STARTED": 7, "CANCELLED": 4, "DONE": 5, "ERROR": 6, }
var ListJobsRequest_JobStateMatcher_name = map[int32]string{ 0: "ALL", 1: "ACTIVE", 2: "NON_ACTIVE", }
var ListJobsRequest_JobStateMatcher_value = map[string]int32{ "ALL": 0, "ACTIVE": 1, "NON_ACTIVE": 2, }
var LoggingConfig_Level_name = map[int32]string{ 0: "LEVEL_UNSPECIFIED", 1: "ALL", 2: "TRACE", 3: "DEBUG", 4: "INFO", 5: "WARN", 6: "ERROR", 7: "FATAL", 8: "OFF", }
var LoggingConfig_Level_value = map[string]int32{ "LEVEL_UNSPECIFIED": 0, "ALL": 1, "TRACE": 2, "DEBUG": 3, "INFO": 4, "WARN": 5, "ERROR": 6, "FATAL": 7, "OFF": 8, }
func RegisterClusterControllerServer ¶
func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)
func RegisterJobControllerServer ¶
func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)
type CancelJobRequest ¶
A request to cancel a job.
type CancelJobRequest struct { // [Required] The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
func (*CancelJobRequest) Descriptor ¶
func (*CancelJobRequest) Descriptor() ([]byte, []int)
func (*CancelJobRequest) GetJobId ¶
func (m *CancelJobRequest) GetJobId() string
func (*CancelJobRequest) GetProjectId ¶
func (m *CancelJobRequest) GetProjectId() string
func (*CancelJobRequest) GetRegion ¶
func (m *CancelJobRequest) GetRegion() string
func (*CancelJobRequest) ProtoMessage ¶
func (*CancelJobRequest) ProtoMessage()
func (*CancelJobRequest) Reset ¶
func (m *CancelJobRequest) Reset()
func (*CancelJobRequest) String ¶
func (m *CancelJobRequest) String() string
type Cluster ¶
Describes the identifying information, config, and status of a cluster of Google Compute Engine instances.
type Cluster struct { // [Required] The Google Cloud Platform project ID that the cluster belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The cluster name. Cluster names within a project must be // unique. Names of deleted clusters can be reused. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // [Required] The cluster config. Note that Cloud Dataproc may set // default values, and values may change when clusters are updated. Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` // [Output-only] Cluster status. Status *ClusterStatus `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` // [Output-only] The previous cluster status. StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` // [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc // generates this value when it creates the cluster. ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` }
func (*Cluster) Descriptor ¶
func (*Cluster) Descriptor() ([]byte, []int)
func (*Cluster) GetClusterName ¶
func (m *Cluster) GetClusterName() string
func (*Cluster) GetClusterUuid ¶
func (m *Cluster) GetClusterUuid() string
func (*Cluster) GetConfig ¶
func (m *Cluster) GetConfig() *ClusterConfig
func (*Cluster) GetProjectId ¶
func (m *Cluster) GetProjectId() string
func (*Cluster) GetStatus ¶
func (m *Cluster) GetStatus() *ClusterStatus
func (*Cluster) GetStatusHistory ¶
func (m *Cluster) GetStatusHistory() []*ClusterStatus
func (*Cluster) ProtoMessage ¶
func (*Cluster) ProtoMessage()
func (*Cluster) Reset ¶
func (m *Cluster) Reset()
func (*Cluster) String ¶
func (m *Cluster) String() string
type ClusterConfig ¶
The cluster config.
type ClusterConfig struct { // [Optional] A Google Cloud Storage staging bucket used for sharing generated // SSH keys and config. If you do not specify a staging bucket, Cloud // Dataproc will determine an appropriate Cloud Storage location (US, // ASIA, or EU) for your cluster's staging bucket according to the Google // Compute Engine zone where your cluster is deployed, and then it will create // and manage this project-level, per-location bucket for you. ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket" json:"config_bucket,omitempty"` // [Required] The shared Google Compute Engine config settings for // all instances in a cluster. GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig" json:"gce_cluster_config,omitempty"` // [Optional] The Google Compute Engine config settings for // the master instance in a cluster. MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig" json:"master_config,omitempty"` // [Optional] The Google Compute Engine config settings for // worker instances in a cluster. WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig" json:"worker_config,omitempty"` // [Optional] The Google Compute Engine config settings for // additional worker instances in a cluster. SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig" json:"secondary_worker_config,omitempty"` // [Optional] The config settings for software inside the cluster. SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig" json:"software_config,omitempty"` // [Optional] Commands to execute on each node after config is // completed. By default, executables are run on master and all worker nodes. // You can test a node's <code>role</code> metadata to run an executable on // a master or worker node, as shown below using `curl` (you can also use `wget`): // // ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) // if [[ "${ROLE}" == 'Master' ]]; then // ... master specific actions ... // else // ... worker specific actions ... // fi InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions" json:"initialization_actions,omitempty"` }
func (*ClusterConfig) Descriptor ¶
func (*ClusterConfig) Descriptor() ([]byte, []int)
func (*ClusterConfig) GetConfigBucket ¶
func (m *ClusterConfig) GetConfigBucket() string
func (*ClusterConfig) GetGceClusterConfig ¶
func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig
func (*ClusterConfig) GetInitializationActions ¶
func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction
func (*ClusterConfig) GetMasterConfig ¶
func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig
func (*ClusterConfig) GetSecondaryWorkerConfig ¶
func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig
func (*ClusterConfig) GetSoftwareConfig ¶
func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig
func (*ClusterConfig) GetWorkerConfig ¶
func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig
func (*ClusterConfig) ProtoMessage ¶
func (*ClusterConfig) ProtoMessage()
func (*ClusterConfig) Reset ¶
func (m *ClusterConfig) Reset()
func (*ClusterConfig) String ¶
func (m *ClusterConfig) String() string
type ClusterControllerClient ¶
type ClusterControllerClient interface { // Creates a cluster in a project. CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Updates a cluster in a project. UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Deletes a cluster in a project. DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Gets the resource representation for a cluster in a project. GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) // Lists all regions/{region}/clusters in a project. ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) // Gets cluster diagnostic information. // After the operation completes, the Operation.response field // contains `DiagnoseClusterOutputLocation`. DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) }
func NewClusterControllerClient ¶
func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient
type ClusterControllerServer ¶
type ClusterControllerServer interface { // Creates a cluster in a project. CreateCluster(context.Context, *CreateClusterRequest) (*google_longrunning.Operation, error) // Updates a cluster in a project. UpdateCluster(context.Context, *UpdateClusterRequest) (*google_longrunning.Operation, error) // Deletes a cluster in a project. DeleteCluster(context.Context, *DeleteClusterRequest) (*google_longrunning.Operation, error) // Gets the resource representation for a cluster in a project. GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) // Lists all regions/{region}/clusters in a project. ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) // Gets cluster diagnostic information. // After the operation completes, the Operation.response field // contains `DiagnoseClusterOutputLocation`. DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*google_longrunning.Operation, error) }
type ClusterOperationMetadata ¶
Metadata describing the operation.
type ClusterOperationMetadata struct { // [Output-only] Name of the cluster for the operation. ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // [Output-only] Cluster UUID for the operation. ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` // [Output-only] Current operation status. Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status" json:"status,omitempty"` // [Output-only] The previous operation status. StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` // [Output-only] The operation type. OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType" json:"operation_type,omitempty"` // [Output-only] Short description of operation. Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"` }
func (*ClusterOperationMetadata) Descriptor ¶
func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)
func (*ClusterOperationMetadata) GetClusterName ¶
func (m *ClusterOperationMetadata) GetClusterName() string
func (*ClusterOperationMetadata) GetClusterUuid ¶
func (m *ClusterOperationMetadata) GetClusterUuid() string
func (*ClusterOperationMetadata) GetDescription ¶
func (m *ClusterOperationMetadata) GetDescription() string
func (*ClusterOperationMetadata) GetOperationType ¶
func (m *ClusterOperationMetadata) GetOperationType() string
func (*ClusterOperationMetadata) GetStatus ¶
func (m *ClusterOperationMetadata) GetStatus() *ClusterOperationStatus
func (*ClusterOperationMetadata) GetStatusHistory ¶
func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus
func (*ClusterOperationMetadata) ProtoMessage ¶
func (*ClusterOperationMetadata) ProtoMessage()
func (*ClusterOperationMetadata) Reset ¶
func (m *ClusterOperationMetadata) Reset()
func (*ClusterOperationMetadata) String ¶
func (m *ClusterOperationMetadata) String() string
type ClusterOperationStatus ¶
The status of the operation.
type ClusterOperationStatus struct { // [Output-only] A message containing the operation state. State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterOperationStatus_State" json:"state,omitempty"` // [Output-only] A message containing the detailed operation state. InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState" json:"inner_state,omitempty"` // [Output-only]A message containing any operation metadata details. Details string `protobuf:"bytes,3,opt,name=details" json:"details,omitempty"` // [Output-only] The time this state was entered. StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` }
func (*ClusterOperationStatus) Descriptor ¶
func (*ClusterOperationStatus) Descriptor() ([]byte, []int)
func (*ClusterOperationStatus) GetDetails ¶
func (m *ClusterOperationStatus) GetDetails() string
func (*ClusterOperationStatus) GetInnerState ¶
func (m *ClusterOperationStatus) GetInnerState() string
func (*ClusterOperationStatus) GetState ¶
func (m *ClusterOperationStatus) GetState() ClusterOperationStatus_State
func (*ClusterOperationStatus) GetStateStartTime ¶
func (m *ClusterOperationStatus) GetStateStartTime() *google_protobuf3.Timestamp
func (*ClusterOperationStatus) ProtoMessage ¶
func (*ClusterOperationStatus) ProtoMessage()
func (*ClusterOperationStatus) Reset ¶
func (m *ClusterOperationStatus) Reset()
func (*ClusterOperationStatus) String ¶
func (m *ClusterOperationStatus) String() string
type ClusterOperationStatus_State ¶
The operation state.
type ClusterOperationStatus_State int32
const ( // Unused. ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0 // The operation has been created. ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1 // The operation is running. ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2 // The operation is done; either cancelled or completed. ClusterOperationStatus_DONE ClusterOperationStatus_State = 3 )
func (ClusterOperationStatus_State) EnumDescriptor ¶
func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)
func (ClusterOperationStatus_State) String ¶
func (x ClusterOperationStatus_State) String() string
type ClusterStatus ¶
The status of a cluster and its instances.
type ClusterStatus struct { // [Output-only] The cluster's state. State ClusterStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"` // [Output-only] Optional details of cluster's state. Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` // [Output-only] Time when this state was entered. StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` }
func (*ClusterStatus) Descriptor ¶
func (*ClusterStatus) Descriptor() ([]byte, []int)
func (*ClusterStatus) GetDetail ¶
func (m *ClusterStatus) GetDetail() string
func (*ClusterStatus) GetState ¶
func (m *ClusterStatus) GetState() ClusterStatus_State
func (*ClusterStatus) GetStateStartTime ¶
func (m *ClusterStatus) GetStateStartTime() *google_protobuf3.Timestamp
func (*ClusterStatus) ProtoMessage ¶
func (*ClusterStatus) ProtoMessage()
func (*ClusterStatus) Reset ¶
func (m *ClusterStatus) Reset()
func (*ClusterStatus) String ¶
func (m *ClusterStatus) String() string
type ClusterStatus_State ¶
The cluster state.
type ClusterStatus_State int32
const ( // The cluster state is unknown. ClusterStatus_UNKNOWN ClusterStatus_State = 0 // The cluster is being created and set up. It is not ready for use. ClusterStatus_CREATING ClusterStatus_State = 1 // The cluster is currently running and healthy. It is ready for use. ClusterStatus_RUNNING ClusterStatus_State = 2 // The cluster encountered an error. It is not ready for use. ClusterStatus_ERROR ClusterStatus_State = 3 // The cluster is being deleted. It cannot be used. ClusterStatus_DELETING ClusterStatus_State = 4 // The cluster is being updated. It continues to accept and process jobs. ClusterStatus_UPDATING ClusterStatus_State = 5 )
func (ClusterStatus_State) EnumDescriptor ¶
func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)
func (ClusterStatus_State) String ¶
func (x ClusterStatus_State) String() string
type CreateClusterRequest ¶
A request to create a cluster.
type CreateClusterRequest struct { // [Required] The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The cluster to create. Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` }
func (*CreateClusterRequest) Descriptor ¶
func (*CreateClusterRequest) Descriptor() ([]byte, []int)
func (*CreateClusterRequest) GetCluster ¶
func (m *CreateClusterRequest) GetCluster() *Cluster
func (*CreateClusterRequest) GetProjectId ¶
func (m *CreateClusterRequest) GetProjectId() string
func (*CreateClusterRequest) GetRegion ¶
func (m *CreateClusterRequest) GetRegion() string
func (*CreateClusterRequest) ProtoMessage ¶
func (*CreateClusterRequest) ProtoMessage()
func (*CreateClusterRequest) Reset ¶
func (m *CreateClusterRequest) Reset()
func (*CreateClusterRequest) String ¶
func (m *CreateClusterRequest) String() string
type DeleteClusterRequest ¶
A request to delete a cluster.
type DeleteClusterRequest struct { // [Required] The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` }
func (*DeleteClusterRequest) Descriptor ¶
func (*DeleteClusterRequest) Descriptor() ([]byte, []int)
func (*DeleteClusterRequest) GetClusterName ¶
func (m *DeleteClusterRequest) GetClusterName() string
func (*DeleteClusterRequest) GetProjectId ¶
func (m *DeleteClusterRequest) GetProjectId() string
func (*DeleteClusterRequest) GetRegion ¶
func (m *DeleteClusterRequest) GetRegion() string
func (*DeleteClusterRequest) ProtoMessage ¶
func (*DeleteClusterRequest) ProtoMessage()
func (*DeleteClusterRequest) Reset ¶
func (m *DeleteClusterRequest) Reset()
func (*DeleteClusterRequest) String ¶
func (m *DeleteClusterRequest) String() string
type DeleteJobRequest ¶
A request to delete a job.
type DeleteJobRequest struct { // [Required] The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
func (*DeleteJobRequest) Descriptor ¶
func (*DeleteJobRequest) Descriptor() ([]byte, []int)
func (*DeleteJobRequest) GetJobId ¶
func (m *DeleteJobRequest) GetJobId() string
func (*DeleteJobRequest) GetProjectId ¶
func (m *DeleteJobRequest) GetProjectId() string
func (*DeleteJobRequest) GetRegion ¶
func (m *DeleteJobRequest) GetRegion() string
func (*DeleteJobRequest) ProtoMessage ¶
func (*DeleteJobRequest) ProtoMessage()
func (*DeleteJobRequest) Reset ¶
func (m *DeleteJobRequest) Reset()
func (*DeleteJobRequest) String ¶
func (m *DeleteJobRequest) String() string
type DiagnoseClusterRequest ¶
A request to collect cluster diagnostic information.
type DiagnoseClusterRequest struct { // [Required] The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` }
func (*DiagnoseClusterRequest) Descriptor ¶
func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)
func (*DiagnoseClusterRequest) GetClusterName ¶
func (m *DiagnoseClusterRequest) GetClusterName() string
func (*DiagnoseClusterRequest) GetProjectId ¶
func (m *DiagnoseClusterRequest) GetProjectId() string
func (*DiagnoseClusterRequest) GetRegion ¶
func (m *DiagnoseClusterRequest) GetRegion() string
func (*DiagnoseClusterRequest) ProtoMessage ¶
func (*DiagnoseClusterRequest) ProtoMessage()
func (*DiagnoseClusterRequest) Reset ¶
func (m *DiagnoseClusterRequest) Reset()
func (*DiagnoseClusterRequest) String ¶
func (m *DiagnoseClusterRequest) String() string
type DiagnoseClusterResults ¶
The location of diagnostic output.
type DiagnoseClusterResults struct { // [Output-only] The Google Cloud Storage URI of the diagnostic output. // The output report is a plain text file with a summary of collected // diagnostics. OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` }
func (*DiagnoseClusterResults) Descriptor ¶
func (*DiagnoseClusterResults) Descriptor() ([]byte, []int)
func (*DiagnoseClusterResults) GetOutputUri ¶
func (m *DiagnoseClusterResults) GetOutputUri() string
func (*DiagnoseClusterResults) ProtoMessage ¶
func (*DiagnoseClusterResults) ProtoMessage()
func (*DiagnoseClusterResults) Reset ¶
func (m *DiagnoseClusterResults) Reset()
func (*DiagnoseClusterResults) String ¶
func (m *DiagnoseClusterResults) String() string
type DiskConfig ¶
Specifies the config of disk options for a group of VM instances.
type DiskConfig struct { // [Optional] Size in GB of the boot disk (default is 500GB). BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb" json:"boot_disk_size_gb,omitempty"` // [Optional] Number of attached SSDs, from 0 to 4 (default is 0). // If SSDs are not attached, the boot disk is used to store runtime logs and // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. // If one or more SSDs are attached, this runtime bulk // data is spread across them, and the boot disk contains only basic // config and installed binaries. NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds" json:"num_local_ssds,omitempty"` }
func (*DiskConfig) Descriptor ¶
func (*DiskConfig) Descriptor() ([]byte, []int)
func (*DiskConfig) GetBootDiskSizeGb ¶
func (m *DiskConfig) GetBootDiskSizeGb() int32
func (*DiskConfig) GetNumLocalSsds ¶
func (m *DiskConfig) GetNumLocalSsds() int32
func (*DiskConfig) ProtoMessage ¶
func (*DiskConfig) ProtoMessage()
func (*DiskConfig) Reset ¶
func (m *DiskConfig) Reset()
func (*DiskConfig) String ¶
func (m *DiskConfig) String() string
type GceClusterConfig ¶
Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.
type GceClusterConfig struct { // [Required] The zone where the Google Compute Engine cluster will be located. // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`. ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri" json:"zone_uri,omitempty"` // [Optional] The Google Compute Engine network to be used for machine // communications. Cannot be specified with subnetwork_uri. If neither // `network_uri` nor `subnetwork_uri` is specified, the "default" network of // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see // [Using Subnetworks](/compute/docs/subnetworks) for more information). // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`. NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri" json:"network_uri,omitempty"` // [Optional] The Google Compute Engine subnetwork to be used for machine // communications. Cannot be specified with network_uri. // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`. SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri" json:"subnetwork_uri,omitempty"` // [Optional] If true, all instances in the cluster will only have internal IP // addresses. By default, clusters are not restricted to internal IP addresses, // and will have ephemeral external IP addresses assigned to each instance. // This `internal_ip_only` restriction can only be enabled for subnetwork // enabled networks, and all off-cluster dependencies must be configured to be // accessible without external IP addresses. InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly" json:"internal_ip_only,omitempty"` // [Optional] The URIs of service account scopes to be included in Google // Compute Engine instances. The following base set of scopes is always // included: // // * https://www.googleapis.com/auth/cloud.useraccounts.readonly // * https://www.googleapis.com/auth/devstorage.read_write // * https://www.googleapis.com/auth/logging.write // // If no scopes are specified, the following defaults are also provided: // // * https://www.googleapis.com/auth/bigquery // * https://www.googleapis.com/auth/bigtable.admin.table // * https://www.googleapis.com/auth/bigtable.data // * https://www.googleapis.com/auth/devstorage.full_control ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes" json:"service_account_scopes,omitempty"` // The Google Compute Engine tags to add to all instances (see // [Labeling instances](/compute/docs/label-or-tag-resources#labeling_instances)). Tags []string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` // The Google Compute Engine metadata entries to add to all instances (see // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` }
func (*GceClusterConfig) Descriptor ¶
func (*GceClusterConfig) Descriptor() ([]byte, []int)
func (*GceClusterConfig) GetInternalIpOnly ¶
func (m *GceClusterConfig) GetInternalIpOnly() bool
func (*GceClusterConfig) GetMetadata ¶
func (m *GceClusterConfig) GetMetadata() map[string]string
func (*GceClusterConfig) GetNetworkUri ¶
func (m *GceClusterConfig) GetNetworkUri() string
func (*GceClusterConfig) GetServiceAccountScopes ¶
func (m *GceClusterConfig) GetServiceAccountScopes() []string
func (*GceClusterConfig) GetSubnetworkUri ¶
func (m *GceClusterConfig) GetSubnetworkUri() string
func (*GceClusterConfig) GetTags ¶
func (m *GceClusterConfig) GetTags() []string
func (*GceClusterConfig) GetZoneUri ¶
func (m *GceClusterConfig) GetZoneUri() string
func (*GceClusterConfig) ProtoMessage ¶
func (*GceClusterConfig) ProtoMessage()
func (*GceClusterConfig) Reset ¶
func (m *GceClusterConfig) Reset()
func (*GceClusterConfig) String ¶
func (m *GceClusterConfig) String() string
type GetClusterRequest ¶
Request to get the resource representation for a cluster in a project.
type GetClusterRequest struct { // [Required] The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` }
func (*GetClusterRequest) Descriptor ¶
func (*GetClusterRequest) Descriptor() ([]byte, []int)
func (*GetClusterRequest) GetClusterName ¶
func (m *GetClusterRequest) GetClusterName() string
func (*GetClusterRequest) GetProjectId ¶
func (m *GetClusterRequest) GetProjectId() string
func (*GetClusterRequest) GetRegion ¶
func (m *GetClusterRequest) GetRegion() string
func (*GetClusterRequest) ProtoMessage ¶
func (*GetClusterRequest) ProtoMessage()
func (*GetClusterRequest) Reset ¶
func (m *GetClusterRequest) Reset()
func (*GetClusterRequest) String ¶
func (m *GetClusterRequest) String() string
type GetJobRequest ¶
A request to get the resource representation for a job in a project.
type GetJobRequest struct { // [Required] The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
func (*GetJobRequest) Descriptor ¶
func (*GetJobRequest) Descriptor() ([]byte, []int)
func (*GetJobRequest) GetJobId ¶
func (m *GetJobRequest) GetJobId() string
func (*GetJobRequest) GetProjectId ¶
func (m *GetJobRequest) GetProjectId() string
func (*GetJobRequest) GetRegion ¶
func (m *GetJobRequest) GetRegion() string
func (*GetJobRequest) ProtoMessage ¶
func (*GetJobRequest) ProtoMessage()
func (*GetJobRequest) Reset ¶
func (m *GetJobRequest) Reset()
func (*GetJobRequest) String ¶
func (m *GetJobRequest) String() string
type HadoopJob ¶
A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
type HadoopJob struct { // [Required] Indicates the location of the driver's main class. Specify // either the jar file that contains the main class or the main class name. // To specify both, add the jar file to `jar_file_uris`, and then specify // the main class name in this property. // // Types that are valid to be assigned to Driver: // *HadoopJob_MainJarFileUri // *HadoopJob_MainClass Driver isHadoopJob_Driver `protobuf_oneof:"driver"` // [Optional] The arguments to pass to the driver. Do not // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job // properties, since a collision may occur that causes an incorrect job // submission. Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` // [Optional] Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied // to the working directory of Hadoop drivers and distributed tasks. Useful // for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` // [Optional] HCFS URIs of archives to be extracted in the working directory of // Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` // [Optional] A mapping of property names to values, used to configure Hadoop. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site and // classes in user code. Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
func (*HadoopJob) Descriptor ¶
func (*HadoopJob) Descriptor() ([]byte, []int)
func (*HadoopJob) GetArchiveUris ¶
func (m *HadoopJob) GetArchiveUris() []string
func (*HadoopJob) GetArgs ¶
func (m *HadoopJob) GetArgs() []string
func (*HadoopJob) GetDriver ¶
func (m *HadoopJob) GetDriver() isHadoopJob_Driver
func (*HadoopJob) GetFileUris ¶
func (m *HadoopJob) GetFileUris() []string
func (*HadoopJob) GetJarFileUris ¶
func (m *HadoopJob) GetJarFileUris() []string
func (*HadoopJob) GetLoggingConfig ¶
func (m *HadoopJob) GetLoggingConfig() *LoggingConfig
func (*HadoopJob) GetMainClass ¶
func (m *HadoopJob) GetMainClass() string
func (*HadoopJob) GetMainJarFileUri ¶
func (m *HadoopJob) GetMainJarFileUri() string
func (*HadoopJob) GetProperties ¶
func (m *HadoopJob) GetProperties() map[string]string
func (*HadoopJob) ProtoMessage ¶
func (*HadoopJob) ProtoMessage()
func (*HadoopJob) Reset ¶
func (m *HadoopJob) Reset()
func (*HadoopJob) String ¶
func (m *HadoopJob) String() string
func (*HadoopJob) XXX_OneofFuncs ¶
func (*HadoopJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type HadoopJob_MainClass ¶
type HadoopJob_MainClass struct { MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"` }
type HadoopJob_MainJarFileUri ¶
type HadoopJob_MainJarFileUri struct { MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"` }
type HiveJob ¶
A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN.
type HiveJob struct { // [Required] The sequence of Hive queries to execute, specified as either // an HCFS file URI or a list of queries. // // Types that are valid to be assigned to Queries: // *HiveJob_QueryFileUri // *HiveJob_QueryList Queries isHiveJob_Queries `protobuf_oneof:"queries"` // [Optional] Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when executing // independent parallel queries. ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` // [Optional] Mapping of query variable names to values (equivalent to the // Hive command: `SET name="value";`). ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] A mapping of property names and values, used to configure Hive. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/hive/conf/hive-site.xml, and classes in user code. Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] HCFS URIs of jar files to add to the CLASSPATH of the // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes // and UDFs. JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` }
func (*HiveJob) Descriptor ¶
func (*HiveJob) Descriptor() ([]byte, []int)
func (*HiveJob) GetContinueOnFailure ¶
func (m *HiveJob) GetContinueOnFailure() bool
func (*HiveJob) GetJarFileUris ¶
func (m *HiveJob) GetJarFileUris() []string
func (*HiveJob) GetProperties ¶
func (m *HiveJob) GetProperties() map[string]string
func (*HiveJob) GetQueries ¶
func (m *HiveJob) GetQueries() isHiveJob_Queries
func (*HiveJob) GetQueryFileUri ¶
func (m *HiveJob) GetQueryFileUri() string
func (*HiveJob) GetQueryList ¶
func (m *HiveJob) GetQueryList() *QueryList
func (*HiveJob) GetScriptVariables ¶
func (m *HiveJob) GetScriptVariables() map[string]string
func (*HiveJob) ProtoMessage ¶
func (*HiveJob) ProtoMessage()
func (*HiveJob) Reset ¶
func (m *HiveJob) Reset()
func (*HiveJob) String ¶
func (m *HiveJob) String() string
func (*HiveJob) XXX_OneofFuncs ¶
func (*HiveJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type HiveJob_QueryFileUri ¶
type HiveJob_QueryFileUri struct { QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` }
type HiveJob_QueryList ¶
type HiveJob_QueryList struct { QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` }
type InstanceGroupConfig ¶
[Optional] The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.
type InstanceGroupConfig struct { // [Required] The number of VM instances in the instance group. // For master instance groups, must be set to 1. NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances" json:"num_instances,omitempty"` // [Optional] The list of instance names. Cloud Dataproc derives the names from // `cluster_name`, `num_instances`, and the instance group if not set by user // (recommended practice is to let Cloud Dataproc derive the name). InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames" json:"instance_names,omitempty"` // [Output-only] The Google Compute Engine image resource used for cluster // instances. Inferred from `SoftwareConfig.image_version`. ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` // [Required] The Google Compute Engine machine type used for cluster instances. // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`. MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri" json:"machine_type_uri,omitempty"` // [Optional] Disk option config settings. DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig" json:"disk_config,omitempty"` // [Optional] Specifies that this instance group contains preemptible instances. IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible" json:"is_preemptible,omitempty"` // [Output-only] The config for Google Compute Engine Instance Group // Manager that manages this group. // This is only used for preemptible instance groups. ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig" json:"managed_group_config,omitempty"` }
func (*InstanceGroupConfig) Descriptor ¶
func (*InstanceGroupConfig) Descriptor() ([]byte, []int)
func (*InstanceGroupConfig) GetDiskConfig ¶
func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig
func (*InstanceGroupConfig) GetImageUri ¶
func (m *InstanceGroupConfig) GetImageUri() string
func (*InstanceGroupConfig) GetInstanceNames ¶
func (m *InstanceGroupConfig) GetInstanceNames() []string
func (*InstanceGroupConfig) GetIsPreemptible ¶
func (m *InstanceGroupConfig) GetIsPreemptible() bool
func (*InstanceGroupConfig) GetMachineTypeUri ¶
func (m *InstanceGroupConfig) GetMachineTypeUri() string
func (*InstanceGroupConfig) GetManagedGroupConfig ¶
func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig
func (*InstanceGroupConfig) GetNumInstances ¶
func (m *InstanceGroupConfig) GetNumInstances() int32
func (*InstanceGroupConfig) ProtoMessage ¶
func (*InstanceGroupConfig) ProtoMessage()
func (*InstanceGroupConfig) Reset ¶
func (m *InstanceGroupConfig) Reset()
func (*InstanceGroupConfig) String ¶
func (m *InstanceGroupConfig) String() string
type Job ¶
A Cloud Dataproc job resource.
type Job struct { // [Optional] The fully qualified reference to the job, which can be used to // obtain the equivalent REST path of the job resource. If this property // is not specified when a job is created, the server generates a // <code>job_id</code>. Reference *JobReference `protobuf:"bytes,1,opt,name=reference" json:"reference,omitempty"` // [Required] Job information, including how, when, and where to // run the job. Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement" json:"placement,omitempty"` // [Required] The application/framework-specific portion of the job. // // Types that are valid to be assigned to TypeJob: // *Job_HadoopJob // *Job_SparkJob // *Job_PysparkJob // *Job_HiveJob // *Job_PigJob // *Job_SparkSqlJob TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` // [Output-only] The job status. Additional application-specific // status information may be contained in the <code>type_job</code> // and <code>yarn_applications</code> fields. Status *JobStatus `protobuf:"bytes,8,opt,name=status" json:"status,omitempty"` // [Output-only] The previous job status. StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` // [Output-only] A URI pointing to the location of the stdout of the job's // driver program. DriverOutputResourceUri string `protobuf:"bytes,17,opt,name=driver_output_resource_uri,json=driverOutputResourceUri" json:"driver_output_resource_uri,omitempty"` // [Output-only] If present, the location of miscellaneous control files // which may be used as part of job setup and handling. If not present, // control files may be placed in the same location as `driver_output_uri`. DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri" json:"driver_control_files_uri,omitempty"` }
func (*Job) Descriptor ¶
func (*Job) Descriptor() ([]byte, []int)
func (*Job) GetDriverControlFilesUri ¶
func (m *Job) GetDriverControlFilesUri() string
func (*Job) GetDriverOutputResourceUri ¶
func (m *Job) GetDriverOutputResourceUri() string
func (*Job) GetHadoopJob ¶
func (m *Job) GetHadoopJob() *HadoopJob
func (*Job) GetHiveJob ¶
func (m *Job) GetHiveJob() *HiveJob
func (*Job) GetPigJob ¶
func (m *Job) GetPigJob() *PigJob
func (*Job) GetPlacement ¶
func (m *Job) GetPlacement() *JobPlacement
func (*Job) GetPysparkJob ¶
func (m *Job) GetPysparkJob() *PySparkJob
func (*Job) GetReference ¶
func (m *Job) GetReference() *JobReference
func (*Job) GetSparkJob ¶
func (m *Job) GetSparkJob() *SparkJob
func (*Job) GetSparkSqlJob ¶
func (m *Job) GetSparkSqlJob() *SparkSqlJob
func (*Job) GetStatus ¶
func (m *Job) GetStatus() *JobStatus
func (*Job) GetStatusHistory ¶
func (m *Job) GetStatusHistory() []*JobStatus
func (*Job) GetTypeJob ¶
func (m *Job) GetTypeJob() isJob_TypeJob
func (*Job) ProtoMessage ¶
func (*Job) ProtoMessage()
func (*Job) Reset ¶
func (m *Job) Reset()
func (*Job) String ¶
func (m *Job) String() string
func (*Job) XXX_OneofFuncs ¶
func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type JobControllerClient ¶
type JobControllerClient interface { // Submits a job to a cluster. SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) // Gets the resource representation for a job in a project. GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) // Lists regions/{region}/jobs in a project. ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) // Starts a job cancellation request. To access the job resource // after cancellation, call // [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or // [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get). CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) // Deletes the job from the project. If the job is active, the delete fails, // and the response returns `FAILED_PRECONDITION`. DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) }
func NewJobControllerClient ¶
func NewJobControllerClient(cc *grpc.ClientConn) JobControllerClient
type JobControllerServer ¶
type JobControllerServer interface { // Submits a job to a cluster. SubmitJob(context.Context, *SubmitJobRequest) (*Job, error) // Gets the resource representation for a job in a project. GetJob(context.Context, *GetJobRequest) (*Job, error) // Lists regions/{region}/jobs in a project. ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) // Starts a job cancellation request. To access the job resource // after cancellation, call // [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or // [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get). CancelJob(context.Context, *CancelJobRequest) (*Job, error) // Deletes the job from the project. If the job is active, the delete fails, // and the response returns `FAILED_PRECONDITION`. DeleteJob(context.Context, *DeleteJobRequest) (*google_protobuf2.Empty, error) }
type JobPlacement ¶
Cloud Dataproc job config.
type JobPlacement struct { // [Required] The name of the cluster where the job will be submitted. ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // [Output-only] A cluster UUID generated by the Cloud Dataproc service when // the job is submitted. ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` }
func (*JobPlacement) Descriptor ¶
func (*JobPlacement) Descriptor() ([]byte, []int)
func (*JobPlacement) GetClusterName ¶
func (m *JobPlacement) GetClusterName() string
func (*JobPlacement) GetClusterUuid ¶
func (m *JobPlacement) GetClusterUuid() string
func (*JobPlacement) ProtoMessage ¶
func (*JobPlacement) ProtoMessage()
func (*JobPlacement) Reset ¶
func (m *JobPlacement) Reset()
func (*JobPlacement) String ¶
func (m *JobPlacement) String() string
type JobReference ¶
Encapsulates the full scoping used to reference a job.
type JobReference struct { // [Required] The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Optional] The job ID, which must be unique within the project. The job ID // is generated by the server upon job submission or provided by the user as a // means to perform retries without creating duplicate jobs. The ID must // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or // hyphens (-). The maximum length is 512 characters. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
func (*JobReference) Descriptor ¶
func (*JobReference) Descriptor() ([]byte, []int)
func (*JobReference) GetJobId ¶
func (m *JobReference) GetJobId() string
func (*JobReference) GetProjectId ¶
func (m *JobReference) GetProjectId() string
func (*JobReference) ProtoMessage ¶
func (*JobReference) ProtoMessage()
func (*JobReference) Reset ¶
func (m *JobReference) Reset()
func (*JobReference) String ¶
func (m *JobReference) String() string
type JobStatus ¶
Cloud Dataproc job status.
type JobStatus struct { // [Output-only] A state message specifying the overall job state. State JobStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.JobStatus_State" json:"state,omitempty"` // [Output-only] Optional job state details, such as an error // description if the state is <code>ERROR</code>. Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` // [Output-only] The time when this state was entered. StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` }
func (*JobStatus) Descriptor ¶
func (*JobStatus) Descriptor() ([]byte, []int)
func (*JobStatus) GetDetails ¶
func (m *JobStatus) GetDetails() string
func (*JobStatus) GetState ¶
func (m *JobStatus) GetState() JobStatus_State
func (*JobStatus) GetStateStartTime ¶
func (m *JobStatus) GetStateStartTime() *google_protobuf3.Timestamp
func (*JobStatus) ProtoMessage ¶
func (*JobStatus) ProtoMessage()
func (*JobStatus) Reset ¶
func (m *JobStatus) Reset()
func (*JobStatus) String ¶
func (m *JobStatus) String() string
type JobStatus_State ¶
The job state.
type JobStatus_State int32
const ( // The job state is unknown. JobStatus_STATE_UNSPECIFIED JobStatus_State = 0 // The job is pending; it has been submitted, but is not yet running. JobStatus_PENDING JobStatus_State = 1 // Job has been received by the service and completed initial setup; // it will soon be submitted to the cluster. JobStatus_SETUP_DONE JobStatus_State = 8 // The job is running on the cluster. JobStatus_RUNNING JobStatus_State = 2 // A CancelJob request has been received, but is pending. JobStatus_CANCEL_PENDING JobStatus_State = 3 // Transient in-flight resources have been canceled, and the request to // cancel the running job has been issued to the cluster. JobStatus_CANCEL_STARTED JobStatus_State = 7 // The job cancellation was successful. JobStatus_CANCELLED JobStatus_State = 4 // The job has completed successfully. JobStatus_DONE JobStatus_State = 5 // The job has completed, but encountered an error. JobStatus_ERROR JobStatus_State = 6 )
func (JobStatus_State) EnumDescriptor ¶
func (JobStatus_State) EnumDescriptor() ([]byte, []int)
func (JobStatus_State) String ¶
func (x JobStatus_State) String() string
type Job_HadoopJob ¶
type Job_HadoopJob struct { HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,oneof"` }
type Job_HiveJob ¶
type Job_HiveJob struct { HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,oneof"` }
type Job_PigJob ¶
type Job_PigJob struct { PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,oneof"` }
type Job_PysparkJob ¶
type Job_PysparkJob struct { PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,oneof"` }
type Job_SparkJob ¶
type Job_SparkJob struct { SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,oneof"` }
type Job_SparkSqlJob ¶
type Job_SparkSqlJob struct { SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,oneof"` }
type ListClustersRequest ¶
A request to list the clusters in a project.
type ListClustersRequest struct { // [Required] The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,4,opt,name=region" json:"region,omitempty"` // [Optional] The standard List page size. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` // [Optional] The standard List page token. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` }
func (*ListClustersRequest) Descriptor ¶
func (*ListClustersRequest) Descriptor() ([]byte, []int)
func (*ListClustersRequest) GetPageSize ¶
func (m *ListClustersRequest) GetPageSize() int32
func (*ListClustersRequest) GetPageToken ¶
func (m *ListClustersRequest) GetPageToken() string
func (*ListClustersRequest) GetProjectId ¶
func (m *ListClustersRequest) GetProjectId() string
func (*ListClustersRequest) GetRegion ¶
func (m *ListClustersRequest) GetRegion() string
func (*ListClustersRequest) ProtoMessage ¶
func (*ListClustersRequest) ProtoMessage()
func (*ListClustersRequest) Reset ¶
func (m *ListClustersRequest) Reset()
func (*ListClustersRequest) String ¶
func (m *ListClustersRequest) String() string
type ListClustersResponse ¶
The list of all clusters in a project.
type ListClustersResponse struct { // [Output-only] The clusters in the project. Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` // [Output-only] This token is included in the response if there are more // results to fetch. To fetch additional results, provide this value as the // `page_token` in a subsequent <code>ListClustersRequest</code>. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` }
func (*ListClustersResponse) Descriptor ¶
func (*ListClustersResponse) Descriptor() ([]byte, []int)
func (*ListClustersResponse) GetClusters ¶
func (m *ListClustersResponse) GetClusters() []*Cluster
func (*ListClustersResponse) GetNextPageToken ¶
func (m *ListClustersResponse) GetNextPageToken() string
func (*ListClustersResponse) ProtoMessage ¶
func (*ListClustersResponse) ProtoMessage()
func (*ListClustersResponse) Reset ¶
func (m *ListClustersResponse) Reset()
func (*ListClustersResponse) String ¶
func (m *ListClustersResponse) String() string
type ListJobsRequest ¶
A request to list jobs in a project.
type ListJobsRequest struct { // [Required] The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,6,opt,name=region" json:"region,omitempty"` // [Optional] The number of results to return in each response. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` // [Optional] The page token, returned by a previous call, to request the // next page of results. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` // [Optional] If set, the returned jobs list includes only jobs that were // submitted to the named cluster. ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // [Optional] Specifies enumerated categories of jobs to list // (default = match ALL jobs). JobStateMatcher ListJobsRequest_JobStateMatcher `protobuf:"varint,5,opt,name=job_state_matcher,json=jobStateMatcher,enum=google.cloud.dataproc.v1.ListJobsRequest_JobStateMatcher" json:"job_state_matcher,omitempty"` }
func (*ListJobsRequest) Descriptor ¶
func (*ListJobsRequest) Descriptor() ([]byte, []int)
func (*ListJobsRequest) GetClusterName ¶
func (m *ListJobsRequest) GetClusterName() string
func (*ListJobsRequest) GetJobStateMatcher ¶
func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher
func (*ListJobsRequest) GetPageSize ¶
func (m *ListJobsRequest) GetPageSize() int32
func (*ListJobsRequest) GetPageToken ¶
func (m *ListJobsRequest) GetPageToken() string
func (*ListJobsRequest) GetProjectId ¶
func (m *ListJobsRequest) GetProjectId() string
func (*ListJobsRequest) GetRegion ¶
func (m *ListJobsRequest) GetRegion() string
func (*ListJobsRequest) ProtoMessage ¶
func (*ListJobsRequest) ProtoMessage()
func (*ListJobsRequest) Reset ¶
func (m *ListJobsRequest) Reset()
func (*ListJobsRequest) String ¶
func (m *ListJobsRequest) String() string
type ListJobsRequest_JobStateMatcher ¶
A matcher that specifies categories of job states.
type ListJobsRequest_JobStateMatcher int32
const ( // Match all jobs, regardless of state. ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0 // Only match jobs in non-terminal states: PENDING, RUNNING, or // CANCEL_PENDING. ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1 // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2 )
func (ListJobsRequest_JobStateMatcher) EnumDescriptor ¶
func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int)
func (ListJobsRequest_JobStateMatcher) String ¶
func (x ListJobsRequest_JobStateMatcher) String() string
type ListJobsResponse ¶
A list of jobs in a project.
type ListJobsResponse struct { // [Output-only] Jobs list. Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` // [Optional] This token is included in the response if there are more results // to fetch. To fetch additional results, provide this value as the // `page_token` in a subsequent <code>ListJobsRequest</code>. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` }
func (*ListJobsResponse) Descriptor ¶
func (*ListJobsResponse) Descriptor() ([]byte, []int)
func (*ListJobsResponse) GetJobs ¶
func (m *ListJobsResponse) GetJobs() []*Job
func (*ListJobsResponse) GetNextPageToken ¶
func (m *ListJobsResponse) GetNextPageToken() string
func (*ListJobsResponse) ProtoMessage ¶
func (*ListJobsResponse) ProtoMessage()
func (*ListJobsResponse) Reset ¶
func (m *ListJobsResponse) Reset()
func (*ListJobsResponse) String ¶
func (m *ListJobsResponse) String() string
type LoggingConfig ¶
The runtime logging config of the job.
type LoggingConfig struct { // The per-package log levels for the driver. This may include // "root" package name to configure rootLogger. // Examples: // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' DriverLogLevels map[string]LoggingConfig_Level `protobuf:"bytes,2,rep,name=driver_log_levels,json=driverLogLevels" json:"driver_log_levels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=google.cloud.dataproc.v1.LoggingConfig_Level"` }
func (*LoggingConfig) Descriptor ¶
func (*LoggingConfig) Descriptor() ([]byte, []int)
func (*LoggingConfig) GetDriverLogLevels ¶
func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level
func (*LoggingConfig) ProtoMessage ¶
func (*LoggingConfig) ProtoMessage()
func (*LoggingConfig) Reset ¶
func (m *LoggingConfig) Reset()
func (*LoggingConfig) String ¶
func (m *LoggingConfig) String() string
type LoggingConfig_Level ¶
The Log4j level for job execution. When running an [Apache Hive](http://hive.apache.org/) job, Cloud Dataproc configures the Hive client to an equivalent verbosity level.
type LoggingConfig_Level int32
const ( // Level is unspecified. Use default level for log4j. LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0 // Use ALL level for log4j. LoggingConfig_ALL LoggingConfig_Level = 1 // Use TRACE level for log4j. LoggingConfig_TRACE LoggingConfig_Level = 2 // Use DEBUG level for log4j. LoggingConfig_DEBUG LoggingConfig_Level = 3 // Use INFO level for log4j. LoggingConfig_INFO LoggingConfig_Level = 4 // Use WARN level for log4j. LoggingConfig_WARN LoggingConfig_Level = 5 // Use ERROR level for log4j. LoggingConfig_ERROR LoggingConfig_Level = 6 // Use FATAL level for log4j. LoggingConfig_FATAL LoggingConfig_Level = 7 // Turn off log4j. LoggingConfig_OFF LoggingConfig_Level = 8 )
func (LoggingConfig_Level) EnumDescriptor ¶
func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int)
func (LoggingConfig_Level) String ¶
func (x LoggingConfig_Level) String() string
type ManagedGroupConfig ¶
Specifies the resources used to actively manage an instance group.
type ManagedGroupConfig struct { // [Output-only] The name of the Instance Template used for the Managed // Instance Group. InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName" json:"instance_template_name,omitempty"` // [Output-only] The name of the Instance Group Manager for this group. InstanceGroupManagerName string `protobuf:"bytes,2,opt,name=instance_group_manager_name,json=instanceGroupManagerName" json:"instance_group_manager_name,omitempty"` }
func (*ManagedGroupConfig) Descriptor ¶
func (*ManagedGroupConfig) Descriptor() ([]byte, []int)
func (*ManagedGroupConfig) GetInstanceGroupManagerName ¶
func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string
func (*ManagedGroupConfig) GetInstanceTemplateName ¶
func (m *ManagedGroupConfig) GetInstanceTemplateName() string
func (*ManagedGroupConfig) ProtoMessage ¶
func (*ManagedGroupConfig) ProtoMessage()
func (*ManagedGroupConfig) Reset ¶
func (m *ManagedGroupConfig) Reset()
func (*ManagedGroupConfig) String ¶
func (m *ManagedGroupConfig) String() string
type NodeInitializationAction ¶
Specifies an executable to run on a fully configured node and a timeout period for executable completion.
type NodeInitializationAction struct { // [Required] Google Cloud Storage URI of executable file. ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile" json:"executable_file,omitempty"` // [Optional] Amount of time executable has to complete. Default is // 10 minutes. Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. ExecutionTimeout *google_protobuf4.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout" json:"execution_timeout,omitempty"` }
func (*NodeInitializationAction) Descriptor ¶
func (*NodeInitializationAction) Descriptor() ([]byte, []int)
func (*NodeInitializationAction) GetExecutableFile ¶
func (m *NodeInitializationAction) GetExecutableFile() string
func (*NodeInitializationAction) GetExecutionTimeout ¶
func (m *NodeInitializationAction) GetExecutionTimeout() *google_protobuf4.Duration
func (*NodeInitializationAction) ProtoMessage ¶
func (*NodeInitializationAction) ProtoMessage()
func (*NodeInitializationAction) Reset ¶
func (m *NodeInitializationAction) Reset()
func (*NodeInitializationAction) String ¶
func (m *NodeInitializationAction) String() string
type PigJob ¶
A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN.
type PigJob struct { // [Required] The sequence of Pig queries to execute, specified as an HCFS // file URI or a list of queries. // // Types that are valid to be assigned to Queries: // *PigJob_QueryFileUri // *PigJob_QueryList Queries isPigJob_Queries `protobuf_oneof:"queries"` // [Optional] Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when executing // independent parallel queries. ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` // [Optional] Mapping of query variable names to values (equivalent to the Pig // command: `name=[value]`). ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] A mapping of property names to values, used to configure Pig. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/pig/conf/pig.properties, and classes in user code. Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] HCFS URIs of jar files to add to the CLASSPATH of // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // [Optional] The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
func (*PigJob) Descriptor ¶
func (*PigJob) Descriptor() ([]byte, []int)
func (*PigJob) GetContinueOnFailure ¶
func (m *PigJob) GetContinueOnFailure() bool
func (*PigJob) GetJarFileUris ¶
func (m *PigJob) GetJarFileUris() []string
func (*PigJob) GetLoggingConfig ¶
func (m *PigJob) GetLoggingConfig() *LoggingConfig
func (*PigJob) GetProperties ¶
func (m *PigJob) GetProperties() map[string]string
func (*PigJob) GetQueries ¶
func (m *PigJob) GetQueries() isPigJob_Queries
func (*PigJob) GetQueryFileUri ¶
func (m *PigJob) GetQueryFileUri() string
func (*PigJob) GetQueryList ¶
func (m *PigJob) GetQueryList() *QueryList
func (*PigJob) GetScriptVariables ¶
func (m *PigJob) GetScriptVariables() map[string]string
func (*PigJob) ProtoMessage ¶
func (*PigJob) ProtoMessage()
func (*PigJob) Reset ¶
func (m *PigJob) Reset()
func (*PigJob) String ¶
func (m *PigJob) String() string
func (*PigJob) XXX_OneofFuncs ¶
func (*PigJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type PigJob_QueryFileUri ¶
type PigJob_QueryFileUri struct { QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` }
type PigJob_QueryList ¶
type PigJob_QueryList struct { QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` }
type PySparkJob ¶
A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.
type PySparkJob struct { // [Required] The HCFS URI of the main Python file to use as the driver. Must // be a .py file. MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri" json:"main_python_file_uri,omitempty"` // [Optional] The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. Args []string `protobuf:"bytes,2,rep,name=args" json:"args,omitempty"` // [Optional] HCFS file URIs of Python files to pass to the PySpark // framework. Supported file types: .py, .egg, and .zip. PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris" json:"python_file_uris,omitempty"` // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the // Python driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // [Optional] HCFS URIs of files to be copied to the working directory of // Python drivers and distributed tasks. Useful for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` // [Optional] HCFS URIs of archives to be extracted in the working directory of // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` // [Optional] A mapping of property names to values, used to configure PySpark. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
func (*PySparkJob) Descriptor ¶
func (*PySparkJob) Descriptor() ([]byte, []int)
func (*PySparkJob) GetArchiveUris ¶
func (m *PySparkJob) GetArchiveUris() []string
func (*PySparkJob) GetArgs ¶
func (m *PySparkJob) GetArgs() []string
func (*PySparkJob) GetFileUris ¶
func (m *PySparkJob) GetFileUris() []string
func (*PySparkJob) GetJarFileUris ¶
func (m *PySparkJob) GetJarFileUris() []string
func (*PySparkJob) GetLoggingConfig ¶
func (m *PySparkJob) GetLoggingConfig() *LoggingConfig
func (*PySparkJob) GetMainPythonFileUri ¶
func (m *PySparkJob) GetMainPythonFileUri() string
func (*PySparkJob) GetProperties ¶
func (m *PySparkJob) GetProperties() map[string]string
func (*PySparkJob) GetPythonFileUris ¶
func (m *PySparkJob) GetPythonFileUris() []string
func (*PySparkJob) ProtoMessage ¶
func (*PySparkJob) ProtoMessage()
func (*PySparkJob) Reset ¶
func (m *PySparkJob) Reset()
func (*PySparkJob) String ¶
func (m *PySparkJob) String() string
type QueryList ¶
A list of queries to run on a cluster.
type QueryList struct { // [Required] The queries to execute. You do not need to terminate a query // with a semicolon. Multiple queries can be specified in one string // by separating each with a semicolon. Here is an example of an Cloud // Dataproc API snippet that uses a QueryList to specify a HiveJob: // // "hiveJob": { // "queryList": { // "queries": [ // "query1", // "query2", // "query3;query4", // ] // } // } Queries []string `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` }
func (*QueryList) Descriptor ¶
func (*QueryList) Descriptor() ([]byte, []int)
func (*QueryList) GetQueries ¶
func (m *QueryList) GetQueries() []string
func (*QueryList) ProtoMessage ¶
func (*QueryList) ProtoMessage()
func (*QueryList) Reset ¶
func (m *QueryList) Reset()
func (*QueryList) String ¶
func (m *QueryList) String() string
type SoftwareConfig ¶
Specifies the selection and config of software inside the cluster.
type SoftwareConfig struct { // [Optional] The version of software inside the cluster. It must match the // regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the // latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion" json:"image_version,omitempty"` // [Optional] The properties to set on daemon config files. // // Property keys are specified in `prefix:property` format, such as // `core:fs.defaultFS`. The following are supported prefixes // and their mappings: // // * core: `core-site.xml` // * hdfs: `hdfs-site.xml` // * mapred: `mapred-site.xml` // * yarn: `yarn-site.xml` // * hive: `hive-site.xml` // * pig: `pig.properties` // * spark: `spark-defaults.conf` Properties map[string]string `protobuf:"bytes,2,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` }
func (*SoftwareConfig) Descriptor ¶
func (*SoftwareConfig) Descriptor() ([]byte, []int)
func (*SoftwareConfig) GetImageVersion ¶
func (m *SoftwareConfig) GetImageVersion() string
func (*SoftwareConfig) GetProperties ¶
func (m *SoftwareConfig) GetProperties() map[string]string
func (*SoftwareConfig) ProtoMessage ¶
func (*SoftwareConfig) ProtoMessage()
func (*SoftwareConfig) Reset ¶
func (m *SoftwareConfig) Reset()
func (*SoftwareConfig) String ¶
func (m *SoftwareConfig) String() string
type SparkJob ¶
A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN.
type SparkJob struct { // [Required] The specification of the main method to call to drive the job. // Specify either the jar file that contains the main class or the main class // name. To pass both a main jar and a main class in that jar, add the jar to // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`. // // Types that are valid to be assigned to Driver: // *SparkJob_MainJarFileUri // *SparkJob_MainClass Driver isSparkJob_Driver `protobuf_oneof:"driver"` // [Optional] The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the // Spark driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // [Optional] HCFS URIs of files to be copied to the working directory of // Spark drivers and distributed tasks. Useful for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` // [Optional] HCFS URIs of archives to be extracted in the working directory // of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` // [Optional] A mapping of property names to values, used to configure Spark. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
func (*SparkJob) Descriptor ¶
func (*SparkJob) Descriptor() ([]byte, []int)
func (*SparkJob) GetArchiveUris ¶
func (m *SparkJob) GetArchiveUris() []string
func (*SparkJob) GetArgs ¶
func (m *SparkJob) GetArgs() []string
func (*SparkJob) GetDriver ¶
func (m *SparkJob) GetDriver() isSparkJob_Driver
func (*SparkJob) GetFileUris ¶
func (m *SparkJob) GetFileUris() []string
func (*SparkJob) GetJarFileUris ¶
func (m *SparkJob) GetJarFileUris() []string
func (*SparkJob) GetLoggingConfig ¶
func (m *SparkJob) GetLoggingConfig() *LoggingConfig
func (*SparkJob) GetMainClass ¶
func (m *SparkJob) GetMainClass() string
func (*SparkJob) GetMainJarFileUri ¶
func (m *SparkJob) GetMainJarFileUri() string
func (*SparkJob) GetProperties ¶
func (m *SparkJob) GetProperties() map[string]string
func (*SparkJob) ProtoMessage ¶
func (*SparkJob) ProtoMessage()
func (*SparkJob) Reset ¶
func (m *SparkJob) Reset()
func (*SparkJob) String ¶
func (m *SparkJob) String() string
func (*SparkJob) XXX_OneofFuncs ¶
func (*SparkJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type SparkJob_MainClass ¶
type SparkJob_MainClass struct { MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"` }
type SparkJob_MainJarFileUri ¶
type SparkJob_MainJarFileUri struct { MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"` }
type SparkSqlJob ¶
A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries.
type SparkSqlJob struct { // [Required] The sequence of Spark SQL queries to execute, specified as // either an HCFS file URI or as a list of queries. // // Types that are valid to be assigned to Queries: // *SparkSqlJob_QueryFileUri // *SparkSqlJob_QueryList Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"` // [Optional] Mapping of query variable names to values (equivalent to the // Spark SQL command: SET `name="value";`). ScriptVariables map[string]string `protobuf:"bytes,3,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] A mapping of property names to values, used to configure // Spark SQL's SparkConf. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Properties map[string]string `protobuf:"bytes,4,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // [Optional] The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
func (*SparkSqlJob) Descriptor ¶
func (*SparkSqlJob) Descriptor() ([]byte, []int)
func (*SparkSqlJob) GetJarFileUris ¶
func (m *SparkSqlJob) GetJarFileUris() []string
func (*SparkSqlJob) GetLoggingConfig ¶
func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig
func (*SparkSqlJob) GetProperties ¶
func (m *SparkSqlJob) GetProperties() map[string]string
func (*SparkSqlJob) GetQueries ¶
func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries
func (*SparkSqlJob) GetQueryFileUri ¶
func (m *SparkSqlJob) GetQueryFileUri() string
func (*SparkSqlJob) GetQueryList ¶
func (m *SparkSqlJob) GetQueryList() *QueryList
func (*SparkSqlJob) GetScriptVariables ¶
func (m *SparkSqlJob) GetScriptVariables() map[string]string
func (*SparkSqlJob) ProtoMessage ¶
func (*SparkSqlJob) ProtoMessage()
func (*SparkSqlJob) Reset ¶
func (m *SparkSqlJob) Reset()
func (*SparkSqlJob) String ¶
func (m *SparkSqlJob) String() string
func (*SparkSqlJob) XXX_OneofFuncs ¶
func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type SparkSqlJob_QueryFileUri ¶
type SparkSqlJob_QueryFileUri struct { QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` }
type SparkSqlJob_QueryList ¶
type SparkSqlJob_QueryList struct { QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` }
type SubmitJobRequest ¶
A request to submit a job.
type SubmitJobRequest struct { // [Required] The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // [Required] The job resource. Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` }
func (*SubmitJobRequest) Descriptor ¶
func (*SubmitJobRequest) Descriptor() ([]byte, []int)
func (*SubmitJobRequest) GetJob ¶
func (m *SubmitJobRequest) GetJob() *Job
func (*SubmitJobRequest) GetProjectId ¶
func (m *SubmitJobRequest) GetProjectId() string
func (*SubmitJobRequest) GetRegion ¶
func (m *SubmitJobRequest) GetRegion() string
func (*SubmitJobRequest) ProtoMessage ¶
func (*SubmitJobRequest) ProtoMessage()
func (*SubmitJobRequest) Reset ¶
func (m *SubmitJobRequest) Reset()
func (*SubmitJobRequest) String ¶
func (m *SubmitJobRequest) String() string
type UpdateClusterRequest ¶
A request to update a cluster.
type UpdateClusterRequest struct { // [Required] The ID of the Google Cloud Platform project the // cluster belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // [Required] The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,5,opt,name=region" json:"region,omitempty"` // [Required] The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // [Required] The changes to the cluster. Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` // [Required] Specifies the path, relative to <code>Cluster</code>, of // the field to update. For example, to change the number of workers // in a cluster to 5, the <code>update_mask</code> parameter would be // specified as <code>config.worker_config.num_instances</code>, // and the `PATCH` request body would specify the new value, as follows: // // { // "config":{ // "workerConfig":{ // "numInstances":"5" // } // } // } // Similarly, to change the number of preemptible workers in a cluster to 5, the // <code>update_mask</code> parameter would be <code>config.secondary_worker_config.num_instances</code>, // and the `PATCH` request body would be set as follows: // // { // "config":{ // "secondaryWorkerConfig":{ // "numInstances":"5" // } // } // } // <strong>Note:</strong> Currently, <code>config.worker_config.num_instances</code> // and <code>config.secondary_worker_config.num_instances</code> are the only // fields that can be updated. UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` }
func (*UpdateClusterRequest) Descriptor ¶
func (*UpdateClusterRequest) Descriptor() ([]byte, []int)
func (*UpdateClusterRequest) GetCluster ¶
func (m *UpdateClusterRequest) GetCluster() *Cluster
func (*UpdateClusterRequest) GetClusterName ¶
func (m *UpdateClusterRequest) GetClusterName() string
func (*UpdateClusterRequest) GetProjectId ¶
func (m *UpdateClusterRequest) GetProjectId() string
func (*UpdateClusterRequest) GetRegion ¶
func (m *UpdateClusterRequest) GetRegion() string
func (*UpdateClusterRequest) GetUpdateMask ¶
func (m *UpdateClusterRequest) GetUpdateMask() *google_protobuf5.FieldMask
func (*UpdateClusterRequest) ProtoMessage ¶
func (*UpdateClusterRequest) ProtoMessage()
func (*UpdateClusterRequest) Reset ¶
func (m *UpdateClusterRequest) Reset()
func (*UpdateClusterRequest) String ¶
func (m *UpdateClusterRequest) String() string