databricks.getClusters
Explore with Pulumi AI
Note If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent default auth: cannot configure default credentials errors.
Retrieves a list of databricks.Cluster ids, that were created by Pulumi or manually, with or without databricks_cluster_policy.
Example Usage
Retrieve cluster IDs for all clusters:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const all = databricks.getClusters({});
import pulumi
import pulumi_databricks as databricks
all = databricks.get_clusters()
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.GetClusters(ctx, &databricks.GetClustersArgs{}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var all = Databricks.GetClusters.Invoke();
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetClustersArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var all = DatabricksFunctions.getClusters();
}
}
variables:
all:
fn::invoke:
function: databricks:getClusters
arguments: {}
Retrieve cluster IDs for all clusters having “Shared” in the cluster name:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const allShared = databricks.getClusters({
clusterNameContains: "shared",
});
import pulumi
import pulumi_databricks as databricks
all_shared = databricks.get_clusters(cluster_name_contains="shared")
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.GetClusters(ctx, &databricks.GetClustersArgs{
ClusterNameContains: pulumi.StringRef("shared"),
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var allShared = Databricks.GetClusters.Invoke(new()
{
ClusterNameContains = "shared",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetClustersArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var allShared = DatabricksFunctions.getClusters(GetClustersArgs.builder()
.clusterNameContains("shared")
.build());
}
}
variables:
allShared:
fn::invoke:
function: databricks:getClusters
arguments:
clusterNameContains: shared
Filtering clusters
Listing clusters can be slow for workspaces containing many clusters. Use filters to limit the number of clusters returned for better performance. You can filter clusters by state, source, policy, or pinned status:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const allRunningClusters = databricks.getClusters({
filterBy: {
clusterStates: ["RUNNING"],
},
});
const allClustersWithPolicy = databricks.getClusters({
filterBy: {
policyId: "1234-5678-9012",
},
});
const allApiClusters = databricks.getClusters({
filterBy: {
clusterSources: ["API"],
},
});
const allPinnedClusters = databricks.getClusters({
filterBy: {
isPinned: true,
},
});
import pulumi
import pulumi_databricks as databricks
all_running_clusters = databricks.get_clusters(filter_by={
"cluster_states": ["RUNNING"],
})
all_clusters_with_policy = databricks.get_clusters(filter_by={
"policy_id": "1234-5678-9012",
})
all_api_clusters = databricks.get_clusters(filter_by={
"cluster_sources": ["API"],
})
all_pinned_clusters = databricks.get_clusters(filter_by={
"is_pinned": True,
})
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.GetClusters(ctx, &databricks.GetClustersArgs{
FilterBy: databricks.GetClustersFilterBy{
ClusterStates: []string{
"RUNNING",
},
},
}, nil)
if err != nil {
return err
}
_, err = databricks.GetClusters(ctx, &databricks.GetClustersArgs{
FilterBy: databricks.GetClustersFilterBy{
PolicyId: pulumi.StringRef("1234-5678-9012"),
},
}, nil)
if err != nil {
return err
}
_, err = databricks.GetClusters(ctx, &databricks.GetClustersArgs{
FilterBy: databricks.GetClustersFilterBy{
ClusterSources: []string{
"API",
},
},
}, nil)
if err != nil {
return err
}
_, err = databricks.GetClusters(ctx, &databricks.GetClustersArgs{
FilterBy: databricks.GetClustersFilterBy{
IsPinned: pulumi.BoolRef(true),
},
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var allRunningClusters = Databricks.GetClusters.Invoke(new()
{
FilterBy = new Databricks.Inputs.GetClustersFilterByInputArgs
{
ClusterStates = new[]
{
"RUNNING",
},
},
});
var allClustersWithPolicy = Databricks.GetClusters.Invoke(new()
{
FilterBy = new Databricks.Inputs.GetClustersFilterByInputArgs
{
PolicyId = "1234-5678-9012",
},
});
var allApiClusters = Databricks.GetClusters.Invoke(new()
{
FilterBy = new Databricks.Inputs.GetClustersFilterByInputArgs
{
ClusterSources = new[]
{
"API",
},
},
});
var allPinnedClusters = Databricks.GetClusters.Invoke(new()
{
FilterBy = new Databricks.Inputs.GetClustersFilterByInputArgs
{
IsPinned = true,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetClustersArgs;
import com.pulumi.databricks.inputs.GetClustersFilterByArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var allRunningClusters = DatabricksFunctions.getClusters(GetClustersArgs.builder()
.filterBy(GetClustersFilterByArgs.builder()
.clusterStates("RUNNING")
.build())
.build());
final var allClustersWithPolicy = DatabricksFunctions.getClusters(GetClustersArgs.builder()
.filterBy(GetClustersFilterByArgs.builder()
.policyId("1234-5678-9012")
.build())
.build());
final var allApiClusters = DatabricksFunctions.getClusters(GetClustersArgs.builder()
.filterBy(GetClustersFilterByArgs.builder()
.clusterSources("API")
.build())
.build());
final var allPinnedClusters = DatabricksFunctions.getClusters(GetClustersArgs.builder()
.filterBy(GetClustersFilterByArgs.builder()
.isPinned(true)
.build())
.build());
}
}
variables:
allRunningClusters:
fn::invoke:
function: databricks:getClusters
arguments:
filterBy:
clusterStates:
- RUNNING
allClustersWithPolicy:
fn::invoke:
function: databricks:getClusters
arguments:
filterBy:
policyId: 1234-5678-9012
allApiClusters:
fn::invoke:
function: databricks:getClusters
arguments:
filterBy:
clusterSources:
- API
allPinnedClusters:
fn::invoke:
function: databricks:getClusters
arguments:
filterBy:
isPinned: true
Related Resources
The following resources are used in the same context:
- End to end workspace management guide.
- databricks.Cluster to create Databricks Clusters.
- databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules.
- databricks.InstancePool to manage instance pools to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances.
- databricks.Job to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
- databricks.Library to install a library on databricks_cluster.
- databricks.Pipeline to deploy Delta Live Tables.
Using getClusters
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getClusters(args: GetClustersArgs, opts?: InvokeOptions): Promise<GetClustersResult>
function getClustersOutput(args: GetClustersOutputArgs, opts?: InvokeOptions): Output<GetClustersResult>
def get_clusters(cluster_name_contains: Optional[str] = None,
filter_by: Optional[GetClustersFilterBy] = None,
id: Optional[str] = None,
ids: Optional[Sequence[str]] = None,
opts: Optional[InvokeOptions] = None) -> GetClustersResult
def get_clusters_output(cluster_name_contains: Optional[pulumi.Input[str]] = None,
filter_by: Optional[pulumi.Input[GetClustersFilterByArgs]] = None,
id: Optional[pulumi.Input[str]] = None,
ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetClustersResult]
func GetClusters(ctx *Context, args *GetClustersArgs, opts ...InvokeOption) (*GetClustersResult, error)
func GetClustersOutput(ctx *Context, args *GetClustersOutputArgs, opts ...InvokeOption) GetClustersResultOutput
> Note: This function is named GetClusters
in the Go SDK.
public static class GetClusters
{
public static Task<GetClustersResult> InvokeAsync(GetClustersArgs args, InvokeOptions? opts = null)
public static Output<GetClustersResult> Invoke(GetClustersInvokeArgs args, InvokeOptions? opts = null)
}
public static CompletableFuture<GetClustersResult> getClusters(GetClustersArgs args, InvokeOptions options)
public static Output<GetClustersResult> getClusters(GetClustersArgs args, InvokeOptions options)
fn::invoke:
function: databricks:index/getClusters:getClusters
arguments:
# arguments dictionary
The following arguments are supported:
- Cluster
Name stringContains - Only return databricks.Cluster ids that match the given name string.
- Filter
By GetClusters Filter By - Filters to apply to the listed clusters. See filter_by Configuration Block below for details.
- Id string
- Ids List<string>
- list of databricks.Cluster ids
- Cluster
Name stringContains - Only return databricks.Cluster ids that match the given name string.
- Filter
By GetClusters Filter By - Filters to apply to the listed clusters. See filter_by Configuration Block below for details.
- Id string
- Ids []string
- list of databricks.Cluster ids
- cluster
Name StringContains - Only return databricks.Cluster ids that match the given name string.
- filter
By GetClusters Filter By - Filters to apply to the listed clusters. See filter_by Configuration Block below for details.
- id String
- ids List<String>
- list of databricks.Cluster ids
- cluster
Name stringContains - Only return databricks.Cluster ids that match the given name string.
- filter
By GetClusters Filter By - Filters to apply to the listed clusters. See filter_by Configuration Block below for details.
- id string
- ids string[]
- list of databricks.Cluster ids
- cluster_
name_ strcontains - Only return databricks.Cluster ids that match the given name string.
- filter_
by GetClusters Filter By - Filters to apply to the listed clusters. See filter_by Configuration Block below for details.
- id str
- ids Sequence[str]
- list of databricks.Cluster ids
- cluster
Name StringContains - Only return databricks.Cluster ids that match the given name string.
- filter
By Property Map - Filters to apply to the listed clusters. See filter_by Configuration Block below for details.
- id String
- ids List<String>
- list of databricks.Cluster ids
getClusters Result
The following output properties are available:
- Id string
- Ids List<string>
- list of databricks.Cluster ids
- Cluster
Name stringContains - Filter
By GetClusters Filter By
- Id string
- Ids []string
- list of databricks.Cluster ids
- Cluster
Name stringContains - Filter
By GetClusters Filter By
- id String
- ids List<String>
- list of databricks.Cluster ids
- cluster
Name StringContains - filter
By GetClusters Filter By
- id string
- ids string[]
- list of databricks.Cluster ids
- cluster
Name stringContains - filter
By GetClusters Filter By
- id str
- ids Sequence[str]
- list of databricks.Cluster ids
- cluster_
name_ strcontains - filter_
by GetClusters Filter By
- id String
- ids List<String>
- list of databricks.Cluster ids
- cluster
Name StringContains - filter
By Property Map
Supporting Types
GetClustersFilterBy
- Cluster
Sources List<string> - List of cluster sources to filter by. Possible values are
API
,JOB
,MODELS
,PIPELINE
,PIPELINE_MAINTENANCE
,SQL
, andUI
. - Cluster
States List<string> - List of cluster states to filter by. Possible values are
RUNNING
,PENDING
,RESIZING
,RESTARTING
,TERMINATING
,TERMINATED
,ERROR
, andUNKNOWN
. - Is
Pinned bool - Whether to filter by pinned clusters.
- Policy
Id string - Filter by databricks.ClusterPolicy id.
- Cluster
Sources []string - List of cluster sources to filter by. Possible values are
API
,JOB
,MODELS
,PIPELINE
,PIPELINE_MAINTENANCE
,SQL
, andUI
. - Cluster
States []string - List of cluster states to filter by. Possible values are
RUNNING
,PENDING
,RESIZING
,RESTARTING
,TERMINATING
,TERMINATED
,ERROR
, andUNKNOWN
. - Is
Pinned bool - Whether to filter by pinned clusters.
- Policy
Id string - Filter by databricks.ClusterPolicy id.
- cluster
Sources List<String> - List of cluster sources to filter by. Possible values are
API
,JOB
,MODELS
,PIPELINE
,PIPELINE_MAINTENANCE
,SQL
, andUI
. - cluster
States List<String> - List of cluster states to filter by. Possible values are
RUNNING
,PENDING
,RESIZING
,RESTARTING
,TERMINATING
,TERMINATED
,ERROR
, andUNKNOWN
. - is
Pinned Boolean - Whether to filter by pinned clusters.
- policy
Id String - Filter by databricks.ClusterPolicy id.
- cluster
Sources string[] - List of cluster sources to filter by. Possible values are
API
,JOB
,MODELS
,PIPELINE
,PIPELINE_MAINTENANCE
,SQL
, andUI
. - cluster
States string[] - List of cluster states to filter by. Possible values are
RUNNING
,PENDING
,RESIZING
,RESTARTING
,TERMINATING
,TERMINATED
,ERROR
, andUNKNOWN
. - is
Pinned boolean - Whether to filter by pinned clusters.
- policy
Id string - Filter by databricks.ClusterPolicy id.
- cluster_
sources Sequence[str] - List of cluster sources to filter by. Possible values are
API
,JOB
,MODELS
,PIPELINE
,PIPELINE_MAINTENANCE
,SQL
, andUI
. - cluster_
states Sequence[str] - List of cluster states to filter by. Possible values are
RUNNING
,PENDING
,RESIZING
,RESTARTING
,TERMINATING
,TERMINATED
,ERROR
, andUNKNOWN
. - is_
pinned bool - Whether to filter by pinned clusters.
- policy_
id str - Filter by databricks.ClusterPolicy id.
- cluster
Sources List<String> - List of cluster sources to filter by. Possible values are
API
,JOB
,MODELS
,PIPELINE
,PIPELINE_MAINTENANCE
,SQL
, andUI
. - cluster
States List<String> - List of cluster states to filter by. Possible values are
RUNNING
,PENDING
,RESIZING
,RESTARTING
,TERMINATING
,TERMINATED
,ERROR
, andUNKNOWN
. - is
Pinned Boolean - Whether to filter by pinned clusters.
- policy
Id String - Filter by databricks.ClusterPolicy id.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.