castai.WorkloadScalingPolicy
Explore with Pulumi AI
Create WorkloadScalingPolicy Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new WorkloadScalingPolicy(name: string, args: WorkloadScalingPolicyArgs, opts?: CustomResourceOptions);
@overload
def WorkloadScalingPolicy(resource_name: str,
args: WorkloadScalingPolicyArgs,
opts: Optional[ResourceOptions] = None)
@overload
def WorkloadScalingPolicy(resource_name: str,
opts: Optional[ResourceOptions] = None,
management_option: Optional[str] = None,
apply_type: Optional[str] = None,
cluster_id: Optional[str] = None,
memory: Optional[WorkloadScalingPolicyMemoryArgs] = None,
cpu: Optional[WorkloadScalingPolicyCpuArgs] = None,
confidence: Optional[WorkloadScalingPolicyConfidenceArgs] = None,
downscaling: Optional[WorkloadScalingPolicyDownscalingArgs] = None,
anti_affinity: Optional[WorkloadScalingPolicyAntiAffinityArgs] = None,
memory_event: Optional[WorkloadScalingPolicyMemoryEventArgs] = None,
name: Optional[str] = None,
startup: Optional[WorkloadScalingPolicyStartupArgs] = None,
timeouts: Optional[WorkloadScalingPolicyTimeoutsArgs] = None,
workload_scaling_policy_id: Optional[str] = None)
func NewWorkloadScalingPolicy(ctx *Context, name string, args WorkloadScalingPolicyArgs, opts ...ResourceOption) (*WorkloadScalingPolicy, error)
public WorkloadScalingPolicy(string name, WorkloadScalingPolicyArgs args, CustomResourceOptions? opts = null)
public WorkloadScalingPolicy(String name, WorkloadScalingPolicyArgs args)
public WorkloadScalingPolicy(String name, WorkloadScalingPolicyArgs args, CustomResourceOptions options)
type: castai:WorkloadScalingPolicy
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args WorkloadScalingPolicyArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args WorkloadScalingPolicyArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args WorkloadScalingPolicyArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args WorkloadScalingPolicyArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args WorkloadScalingPolicyArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var workloadScalingPolicyResource = new Castai.WorkloadScalingPolicy("workloadScalingPolicyResource", new()
{
ManagementOption = "string",
ApplyType = "string",
ClusterId = "string",
Memory = new Castai.Inputs.WorkloadScalingPolicyMemoryArgs
{
ApplyThresholdStrategy = new Castai.Inputs.WorkloadScalingPolicyMemoryApplyThresholdStrategyArgs
{
Type = "string",
Denominator = "string",
Exponent = 0,
Numerator = 0,
Percentage = 0,
},
Args = new[]
{
"string",
},
Function = "string",
Limit = new Castai.Inputs.WorkloadScalingPolicyMemoryLimitArgs
{
Type = "string",
Multiplier = 0,
},
LookBackPeriodSeconds = 0,
ManagementOption = "string",
Max = 0,
Min = 0,
Overhead = 0,
},
Cpu = new Castai.Inputs.WorkloadScalingPolicyCpuArgs
{
ApplyThresholdStrategy = new Castai.Inputs.WorkloadScalingPolicyCpuApplyThresholdStrategyArgs
{
Type = "string",
Denominator = "string",
Exponent = 0,
Numerator = 0,
Percentage = 0,
},
Args = new[]
{
"string",
},
Function = "string",
Limit = new Castai.Inputs.WorkloadScalingPolicyCpuLimitArgs
{
Type = "string",
Multiplier = 0,
},
LookBackPeriodSeconds = 0,
ManagementOption = "string",
Max = 0,
Min = 0,
Overhead = 0,
},
Confidence = new Castai.Inputs.WorkloadScalingPolicyConfidenceArgs
{
Threshold = 0,
},
Downscaling = new Castai.Inputs.WorkloadScalingPolicyDownscalingArgs
{
ApplyType = "string",
},
AntiAffinity = new Castai.Inputs.WorkloadScalingPolicyAntiAffinityArgs
{
ConsiderAntiAffinity = false,
},
MemoryEvent = new Castai.Inputs.WorkloadScalingPolicyMemoryEventArgs
{
ApplyType = "string",
},
Name = "string",
Startup = new Castai.Inputs.WorkloadScalingPolicyStartupArgs
{
PeriodSeconds = 0,
},
Timeouts = new Castai.Inputs.WorkloadScalingPolicyTimeoutsArgs
{
Create = "string",
Delete = "string",
Read = "string",
Update = "string",
},
WorkloadScalingPolicyId = "string",
});
example, err := castai.NewWorkloadScalingPolicy(ctx, "workloadScalingPolicyResource", &castai.WorkloadScalingPolicyArgs{
ManagementOption: pulumi.String("string"),
ApplyType: pulumi.String("string"),
ClusterId: pulumi.String("string"),
Memory: &.WorkloadScalingPolicyMemoryArgs{
ApplyThresholdStrategy: &.WorkloadScalingPolicyMemoryApplyThresholdStrategyArgs{
Type: pulumi.String("string"),
Denominator: pulumi.String("string"),
Exponent: pulumi.Float64(0),
Numerator: pulumi.Float64(0),
Percentage: pulumi.Float64(0),
},
Args: pulumi.StringArray{
pulumi.String("string"),
},
Function: pulumi.String("string"),
Limit: &.WorkloadScalingPolicyMemoryLimitArgs{
Type: pulumi.String("string"),
Multiplier: pulumi.Float64(0),
},
LookBackPeriodSeconds: pulumi.Float64(0),
ManagementOption: pulumi.String("string"),
Max: pulumi.Float64(0),
Min: pulumi.Float64(0),
Overhead: pulumi.Float64(0),
},
Cpu: &.WorkloadScalingPolicyCpuArgs{
ApplyThresholdStrategy: &.WorkloadScalingPolicyCpuApplyThresholdStrategyArgs{
Type: pulumi.String("string"),
Denominator: pulumi.String("string"),
Exponent: pulumi.Float64(0),
Numerator: pulumi.Float64(0),
Percentage: pulumi.Float64(0),
},
Args: pulumi.StringArray{
pulumi.String("string"),
},
Function: pulumi.String("string"),
Limit: &.WorkloadScalingPolicyCpuLimitArgs{
Type: pulumi.String("string"),
Multiplier: pulumi.Float64(0),
},
LookBackPeriodSeconds: pulumi.Float64(0),
ManagementOption: pulumi.String("string"),
Max: pulumi.Float64(0),
Min: pulumi.Float64(0),
Overhead: pulumi.Float64(0),
},
Confidence: &.WorkloadScalingPolicyConfidenceArgs{
Threshold: pulumi.Float64(0),
},
Downscaling: &.WorkloadScalingPolicyDownscalingArgs{
ApplyType: pulumi.String("string"),
},
AntiAffinity: &.WorkloadScalingPolicyAntiAffinityArgs{
ConsiderAntiAffinity: pulumi.Bool(false),
},
MemoryEvent: &.WorkloadScalingPolicyMemoryEventArgs{
ApplyType: pulumi.String("string"),
},
Name: pulumi.String("string"),
Startup: &.WorkloadScalingPolicyStartupArgs{
PeriodSeconds: pulumi.Float64(0),
},
Timeouts: &.WorkloadScalingPolicyTimeoutsArgs{
Create: pulumi.String("string"),
Delete: pulumi.String("string"),
Read: pulumi.String("string"),
Update: pulumi.String("string"),
},
WorkloadScalingPolicyId: pulumi.String("string"),
})
var workloadScalingPolicyResource = new WorkloadScalingPolicy("workloadScalingPolicyResource", WorkloadScalingPolicyArgs.builder()
.managementOption("string")
.applyType("string")
.clusterId("string")
.memory(WorkloadScalingPolicyMemoryArgs.builder()
.applyThresholdStrategy(WorkloadScalingPolicyMemoryApplyThresholdStrategyArgs.builder()
.type("string")
.denominator("string")
.exponent(0)
.numerator(0)
.percentage(0)
.build())
.args("string")
.function("string")
.limit(WorkloadScalingPolicyMemoryLimitArgs.builder()
.type("string")
.multiplier(0)
.build())
.lookBackPeriodSeconds(0)
.managementOption("string")
.max(0)
.min(0)
.overhead(0)
.build())
.cpu(WorkloadScalingPolicyCpuArgs.builder()
.applyThresholdStrategy(WorkloadScalingPolicyCpuApplyThresholdStrategyArgs.builder()
.type("string")
.denominator("string")
.exponent(0)
.numerator(0)
.percentage(0)
.build())
.args("string")
.function("string")
.limit(WorkloadScalingPolicyCpuLimitArgs.builder()
.type("string")
.multiplier(0)
.build())
.lookBackPeriodSeconds(0)
.managementOption("string")
.max(0)
.min(0)
.overhead(0)
.build())
.confidence(WorkloadScalingPolicyConfidenceArgs.builder()
.threshold(0)
.build())
.downscaling(WorkloadScalingPolicyDownscalingArgs.builder()
.applyType("string")
.build())
.antiAffinity(WorkloadScalingPolicyAntiAffinityArgs.builder()
.considerAntiAffinity(false)
.build())
.memoryEvent(WorkloadScalingPolicyMemoryEventArgs.builder()
.applyType("string")
.build())
.name("string")
.startup(WorkloadScalingPolicyStartupArgs.builder()
.periodSeconds(0)
.build())
.timeouts(WorkloadScalingPolicyTimeoutsArgs.builder()
.create("string")
.delete("string")
.read("string")
.update("string")
.build())
.workloadScalingPolicyId("string")
.build());
workload_scaling_policy_resource = castai.WorkloadScalingPolicy("workloadScalingPolicyResource",
management_option="string",
apply_type="string",
cluster_id="string",
memory={
"apply_threshold_strategy": {
"type": "string",
"denominator": "string",
"exponent": 0,
"numerator": 0,
"percentage": 0,
},
"args": ["string"],
"function": "string",
"limit": {
"type": "string",
"multiplier": 0,
},
"look_back_period_seconds": 0,
"management_option": "string",
"max": 0,
"min": 0,
"overhead": 0,
},
cpu={
"apply_threshold_strategy": {
"type": "string",
"denominator": "string",
"exponent": 0,
"numerator": 0,
"percentage": 0,
},
"args": ["string"],
"function": "string",
"limit": {
"type": "string",
"multiplier": 0,
},
"look_back_period_seconds": 0,
"management_option": "string",
"max": 0,
"min": 0,
"overhead": 0,
},
confidence={
"threshold": 0,
},
downscaling={
"apply_type": "string",
},
anti_affinity={
"consider_anti_affinity": False,
},
memory_event={
"apply_type": "string",
},
name="string",
startup={
"period_seconds": 0,
},
timeouts={
"create": "string",
"delete": "string",
"read": "string",
"update": "string",
},
workload_scaling_policy_id="string")
const workloadScalingPolicyResource = new castai.WorkloadScalingPolicy("workloadScalingPolicyResource", {
managementOption: "string",
applyType: "string",
clusterId: "string",
memory: {
applyThresholdStrategy: {
type: "string",
denominator: "string",
exponent: 0,
numerator: 0,
percentage: 0,
},
args: ["string"],
"function": "string",
limit: {
type: "string",
multiplier: 0,
},
lookBackPeriodSeconds: 0,
managementOption: "string",
max: 0,
min: 0,
overhead: 0,
},
cpu: {
applyThresholdStrategy: {
type: "string",
denominator: "string",
exponent: 0,
numerator: 0,
percentage: 0,
},
args: ["string"],
"function": "string",
limit: {
type: "string",
multiplier: 0,
},
lookBackPeriodSeconds: 0,
managementOption: "string",
max: 0,
min: 0,
overhead: 0,
},
confidence: {
threshold: 0,
},
downscaling: {
applyType: "string",
},
antiAffinity: {
considerAntiAffinity: false,
},
memoryEvent: {
applyType: "string",
},
name: "string",
startup: {
periodSeconds: 0,
},
timeouts: {
create: "string",
"delete": "string",
read: "string",
update: "string",
},
workloadScalingPolicyId: "string",
});
type: castai:WorkloadScalingPolicy
properties:
antiAffinity:
considerAntiAffinity: false
applyType: string
clusterId: string
confidence:
threshold: 0
cpu:
applyThresholdStrategy:
denominator: string
exponent: 0
numerator: 0
percentage: 0
type: string
args:
- string
function: string
limit:
multiplier: 0
type: string
lookBackPeriodSeconds: 0
managementOption: string
max: 0
min: 0
overhead: 0
downscaling:
applyType: string
managementOption: string
memory:
applyThresholdStrategy:
denominator: string
exponent: 0
numerator: 0
percentage: 0
type: string
args:
- string
function: string
limit:
multiplier: 0
type: string
lookBackPeriodSeconds: 0
managementOption: string
max: 0
min: 0
overhead: 0
memoryEvent:
applyType: string
name: string
startup:
periodSeconds: 0
timeouts:
create: string
delete: string
read: string
update: string
workloadScalingPolicyId: string
WorkloadScalingPolicy Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The WorkloadScalingPolicy resource accepts the following input properties:
- Apply
Type string - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- Cluster
Id string - CAST AI cluster id
- Cpu
Workload
Scaling Policy Cpu - Management
Option string - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- Memory
Workload
Scaling Policy Memory - Anti
Affinity WorkloadScaling Policy Anti Affinity - Confidence
Workload
Scaling Policy Confidence - Defines the confidence settings for applying recommendations.
- Downscaling
Workload
Scaling Policy Downscaling - Memory
Event WorkloadScaling Policy Memory Event - Name string
- Scaling policy name
- Startup
Workload
Scaling Policy Startup - Timeouts
Workload
Scaling Policy Timeouts - Workload
Scaling stringPolicy Id
- Apply
Type string - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- Cluster
Id string - CAST AI cluster id
- Cpu
Workload
Scaling Policy Cpu Args - Management
Option string - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- Memory
Workload
Scaling Policy Memory Args - Anti
Affinity WorkloadScaling Policy Anti Affinity Args - Confidence
Workload
Scaling Policy Confidence Args - Defines the confidence settings for applying recommendations.
- Downscaling
Workload
Scaling Policy Downscaling Args - Memory
Event WorkloadScaling Policy Memory Event Args - Name string
- Scaling policy name
- Startup
Workload
Scaling Policy Startup Args - Timeouts
Workload
Scaling Policy Timeouts Args - Workload
Scaling stringPolicy Id
- apply
Type String - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster
Id String - CAST AI cluster id
- cpu
Workload
Scaling Policy Cpu - management
Option String - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory
Workload
Scaling Policy Memory - anti
Affinity WorkloadScaling Policy Anti Affinity - confidence
Workload
Scaling Policy Confidence - Defines the confidence settings for applying recommendations.
- downscaling
Workload
Scaling Policy Downscaling - memory
Event WorkloadScaling Policy Memory Event - name String
- Scaling policy name
- startup
Workload
Scaling Policy Startup - timeouts
Workload
Scaling Policy Timeouts - workload
Scaling StringPolicy Id
- apply
Type string - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster
Id string - CAST AI cluster id
- cpu
Workload
Scaling Policy Cpu - management
Option string - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory
Workload
Scaling Policy Memory - anti
Affinity WorkloadScaling Policy Anti Affinity - confidence
Workload
Scaling Policy Confidence - Defines the confidence settings for applying recommendations.
- downscaling
Workload
Scaling Policy Downscaling - memory
Event WorkloadScaling Policy Memory Event - name string
- Scaling policy name
- startup
Workload
Scaling Policy Startup - timeouts
Workload
Scaling Policy Timeouts - workload
Scaling stringPolicy Id
- apply_
type str - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster_
id str - CAST AI cluster id
- cpu
Workload
Scaling Policy Cpu Args - management_
option str - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory
Workload
Scaling Policy Memory Args - anti_
affinity WorkloadScaling Policy Anti Affinity Args - confidence
Workload
Scaling Policy Confidence Args - Defines the confidence settings for applying recommendations.
- downscaling
Workload
Scaling Policy Downscaling Args - memory_
event WorkloadScaling Policy Memory Event Args - name str
- Scaling policy name
- startup
Workload
Scaling Policy Startup Args - timeouts
Workload
Scaling Policy Timeouts Args - workload_
scaling_ strpolicy_ id
- apply
Type String - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster
Id String - CAST AI cluster id
- cpu Property Map
- management
Option String - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory Property Map
- anti
Affinity Property Map - confidence Property Map
- Defines the confidence settings for applying recommendations.
- downscaling Property Map
- memory
Event Property Map - name String
- Scaling policy name
- startup Property Map
- timeouts Property Map
- workload
Scaling StringPolicy Id
Outputs
All input properties are implicitly available as output properties. Additionally, the WorkloadScalingPolicy resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing WorkloadScalingPolicy Resource
Get an existing WorkloadScalingPolicy resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: WorkloadScalingPolicyState, opts?: CustomResourceOptions): WorkloadScalingPolicy
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
anti_affinity: Optional[WorkloadScalingPolicyAntiAffinityArgs] = None,
apply_type: Optional[str] = None,
cluster_id: Optional[str] = None,
confidence: Optional[WorkloadScalingPolicyConfidenceArgs] = None,
cpu: Optional[WorkloadScalingPolicyCpuArgs] = None,
downscaling: Optional[WorkloadScalingPolicyDownscalingArgs] = None,
management_option: Optional[str] = None,
memory: Optional[WorkloadScalingPolicyMemoryArgs] = None,
memory_event: Optional[WorkloadScalingPolicyMemoryEventArgs] = None,
name: Optional[str] = None,
startup: Optional[WorkloadScalingPolicyStartupArgs] = None,
timeouts: Optional[WorkloadScalingPolicyTimeoutsArgs] = None,
workload_scaling_policy_id: Optional[str] = None) -> WorkloadScalingPolicy
func GetWorkloadScalingPolicy(ctx *Context, name string, id IDInput, state *WorkloadScalingPolicyState, opts ...ResourceOption) (*WorkloadScalingPolicy, error)
public static WorkloadScalingPolicy Get(string name, Input<string> id, WorkloadScalingPolicyState? state, CustomResourceOptions? opts = null)
public static WorkloadScalingPolicy get(String name, Output<String> id, WorkloadScalingPolicyState state, CustomResourceOptions options)
resources: _: type: castai:WorkloadScalingPolicy get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Anti
Affinity WorkloadScaling Policy Anti Affinity - Apply
Type string - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- Cluster
Id string - CAST AI cluster id
- Confidence
Workload
Scaling Policy Confidence - Defines the confidence settings for applying recommendations.
- Cpu
Workload
Scaling Policy Cpu - Downscaling
Workload
Scaling Policy Downscaling - Management
Option string - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- Memory
Workload
Scaling Policy Memory - Memory
Event WorkloadScaling Policy Memory Event - Name string
- Scaling policy name
- Startup
Workload
Scaling Policy Startup - Timeouts
Workload
Scaling Policy Timeouts - Workload
Scaling stringPolicy Id
- Anti
Affinity WorkloadScaling Policy Anti Affinity Args - Apply
Type string - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- Cluster
Id string - CAST AI cluster id
- Confidence
Workload
Scaling Policy Confidence Args - Defines the confidence settings for applying recommendations.
- Cpu
Workload
Scaling Policy Cpu Args - Downscaling
Workload
Scaling Policy Downscaling Args - Management
Option string - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- Memory
Workload
Scaling Policy Memory Args - Memory
Event WorkloadScaling Policy Memory Event Args - Name string
- Scaling policy name
- Startup
Workload
Scaling Policy Startup Args - Timeouts
Workload
Scaling Policy Timeouts Args - Workload
Scaling stringPolicy Id
- anti
Affinity WorkloadScaling Policy Anti Affinity - apply
Type String - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster
Id String - CAST AI cluster id
- confidence
Workload
Scaling Policy Confidence - Defines the confidence settings for applying recommendations.
- cpu
Workload
Scaling Policy Cpu - downscaling
Workload
Scaling Policy Downscaling - management
Option String - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory
Workload
Scaling Policy Memory - memory
Event WorkloadScaling Policy Memory Event - name String
- Scaling policy name
- startup
Workload
Scaling Policy Startup - timeouts
Workload
Scaling Policy Timeouts - workload
Scaling StringPolicy Id
- anti
Affinity WorkloadScaling Policy Anti Affinity - apply
Type string - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster
Id string - CAST AI cluster id
- confidence
Workload
Scaling Policy Confidence - Defines the confidence settings for applying recommendations.
- cpu
Workload
Scaling Policy Cpu - downscaling
Workload
Scaling Policy Downscaling - management
Option string - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory
Workload
Scaling Policy Memory - memory
Event WorkloadScaling Policy Memory Event - name string
- Scaling policy name
- startup
Workload
Scaling Policy Startup - timeouts
Workload
Scaling Policy Timeouts - workload
Scaling stringPolicy Id
- anti_
affinity WorkloadScaling Policy Anti Affinity Args - apply_
type str - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster_
id str - CAST AI cluster id
- confidence
Workload
Scaling Policy Confidence Args - Defines the confidence settings for applying recommendations.
- cpu
Workload
Scaling Policy Cpu Args - downscaling
Workload
Scaling Policy Downscaling Args - management_
option str - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory
Workload
Scaling Policy Memory Args - memory_
event WorkloadScaling Policy Memory Event Args - name str
- Scaling policy name
- startup
Workload
Scaling Policy Startup Args - timeouts
Workload
Scaling Policy Timeouts Args - workload_
scaling_ strpolicy_ id
- anti
Affinity Property Map - apply
Type String - Recommendation apply type. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED
- pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- cluster
Id String - CAST AI cluster id
- confidence Property Map
- Defines the confidence settings for applying recommendations.
- cpu Property Map
- downscaling Property Map
- management
Option String - Defines possible options for workload management. - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload.
- memory Property Map
- memory
Event Property Map - name String
- Scaling policy name
- startup Property Map
- timeouts Property Map
- workload
Scaling StringPolicy Id
Supporting Types
WorkloadScalingPolicyAntiAffinity, WorkloadScalingPolicyAntiAffinityArgs
- Consider
Anti boolAffinity - Defines if anti-affinity should be considered when scaling the workload. If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred.
- Consider
Anti boolAffinity - Defines if anti-affinity should be considered when scaling the workload. If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred.
- consider
Anti BooleanAffinity - Defines if anti-affinity should be considered when scaling the workload. If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred.
- consider
Anti booleanAffinity - Defines if anti-affinity should be considered when scaling the workload. If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred.
- consider_
anti_ boolaffinity - Defines if anti-affinity should be considered when scaling the workload. If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred.
- consider
Anti BooleanAffinity - Defines if anti-affinity should be considered when scaling the workload. If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred.
WorkloadScalingPolicyConfidence, WorkloadScalingPolicyConfidenceArgs
- Threshold double
- Defines the confidence threshold for applying recommendations. The smaller number indicates that we require fewer metrics data points to apply recommendations - changing this value can cause applying less precise recommendations. Do not change the default unless you want to optimize with fewer data points (e.g., short-lived workloads).
- Threshold float64
- Defines the confidence threshold for applying recommendations. The smaller number indicates that we require fewer metrics data points to apply recommendations - changing this value can cause applying less precise recommendations. Do not change the default unless you want to optimize with fewer data points (e.g., short-lived workloads).
- threshold Double
- Defines the confidence threshold for applying recommendations. The smaller number indicates that we require fewer metrics data points to apply recommendations - changing this value can cause applying less precise recommendations. Do not change the default unless you want to optimize with fewer data points (e.g., short-lived workloads).
- threshold number
- Defines the confidence threshold for applying recommendations. The smaller number indicates that we require fewer metrics data points to apply recommendations - changing this value can cause applying less precise recommendations. Do not change the default unless you want to optimize with fewer data points (e.g., short-lived workloads).
- threshold float
- Defines the confidence threshold for applying recommendations. The smaller number indicates that we require fewer metrics data points to apply recommendations - changing this value can cause applying less precise recommendations. Do not change the default unless you want to optimize with fewer data points (e.g., short-lived workloads).
- threshold Number
- Defines the confidence threshold for applying recommendations. The smaller number indicates that we require fewer metrics data points to apply recommendations - changing this value can cause applying less precise recommendations. Do not change the default unless you want to optimize with fewer data points (e.g., short-lived workloads).
WorkloadScalingPolicyCpu, WorkloadScalingPolicyCpuArgs
- Apply
Threshold double - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- Apply
Threshold WorkloadStrategy Scaling Policy Cpu Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - Args List<string>
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - Function string
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- Limit
Workload
Scaling Policy Cpu Limit - Resource limit settings
- Look
Back doublePeriod Seconds - The look back period in seconds for the recommendation.
- Management
Option string - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - Max double
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Min double
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Overhead double
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- Apply
Threshold float64 - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- Apply
Threshold WorkloadStrategy Scaling Policy Cpu Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - Args []string
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - Function string
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- Limit
Workload
Scaling Policy Cpu Limit - Resource limit settings
- Look
Back float64Period Seconds - The look back period in seconds for the recommendation.
- Management
Option string - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - Max float64
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Min float64
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Overhead float64
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply
Threshold Double - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply
Threshold WorkloadStrategy Scaling Policy Cpu Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args List<String>
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function String
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit
Workload
Scaling Policy Cpu Limit - Resource limit settings
- look
Back DoublePeriod Seconds - The look back period in seconds for the recommendation.
- management
Option String - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max Double
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min Double
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead Double
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply
Threshold number - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply
Threshold WorkloadStrategy Scaling Policy Cpu Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args string[]
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function string
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit
Workload
Scaling Policy Cpu Limit - Resource limit settings
- look
Back numberPeriod Seconds - The look back period in seconds for the recommendation.
- management
Option string - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max number
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min number
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead number
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply_
threshold float - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply_
threshold_ Workloadstrategy Scaling Policy Cpu Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args Sequence[str]
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function str
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit
Workload
Scaling Policy Cpu Limit - Resource limit settings
- look_
back_ floatperiod_ seconds - The look back period in seconds for the recommendation.
- management_
option str - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max float
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min float
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead float
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply
Threshold Number - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply
Threshold Property MapStrategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args List<String>
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function String
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit Property Map
- Resource limit settings
- look
Back NumberPeriod Seconds - The look back period in seconds for the recommendation.
- management
Option String - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max Number
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min Number
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead Number
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
WorkloadScalingPolicyCpuApplyThresholdStrategy, WorkloadScalingPolicyCpuApplyThresholdStrategyArgs
- Type string
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- Denominator string
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Exponent double
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- Numerator double
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Percentage double
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- Type string
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- Denominator string
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Exponent float64
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- Numerator float64
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Percentage float64
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type String
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator String
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent Double
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator Double
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage Double
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type string
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator string
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent number
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator number
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage number
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type str
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator str
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent float
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator float
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage float
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type String
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator String
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent Number
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator Number
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage Number
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
WorkloadScalingPolicyCpuLimit, WorkloadScalingPolicyCpuLimitArgs
- Type string
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- Multiplier double
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- Type string
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- Multiplier float64
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type String
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier Double
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type string
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier number
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type str
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier float
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type String
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier Number
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
WorkloadScalingPolicyDownscaling, WorkloadScalingPolicyDownscalingArgs
- Apply
Type string - Defines the apply type to be used when downscaling. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- Apply
Type string - Defines the apply type to be used when downscaling. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply
Type String - Defines the apply type to be used when downscaling. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply
Type string - Defines the apply type to be used when downscaling. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply_
type str - Defines the apply type to be used when downscaling. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply
Type String - Defines the apply type to be used when downscaling. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
WorkloadScalingPolicyMemory, WorkloadScalingPolicyMemoryArgs
- Apply
Threshold double - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- Apply
Threshold WorkloadStrategy Scaling Policy Memory Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - Args List<string>
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - Function string
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- Limit
Workload
Scaling Policy Memory Limit - Resource limit settings
- Look
Back doublePeriod Seconds - The look back period in seconds for the recommendation.
- Management
Option string - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - Max double
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Min double
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Overhead double
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- Apply
Threshold float64 - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- Apply
Threshold WorkloadStrategy Scaling Policy Memory Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - Args []string
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - Function string
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- Limit
Workload
Scaling Policy Memory Limit - Resource limit settings
- Look
Back float64Period Seconds - The look back period in seconds for the recommendation.
- Management
Option string - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - Max float64
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Min float64
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- Overhead float64
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply
Threshold Double - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply
Threshold WorkloadStrategy Scaling Policy Memory Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args List<String>
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function String
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit
Workload
Scaling Policy Memory Limit - Resource limit settings
- look
Back DoublePeriod Seconds - The look back period in seconds for the recommendation.
- management
Option String - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max Double
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min Double
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead Double
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply
Threshold number - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply
Threshold WorkloadStrategy Scaling Policy Memory Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args string[]
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function string
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit
Workload
Scaling Policy Memory Limit - Resource limit settings
- look
Back numberPeriod Seconds - The look back period in seconds for the recommendation.
- management
Option string - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max number
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min number
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead number
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply_
threshold float - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply_
threshold_ Workloadstrategy Scaling Policy Memory Apply Threshold Strategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args Sequence[str]
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function str
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit
Workload
Scaling Policy Memory Limit - Resource limit settings
- look_
back_ floatperiod_ seconds - The look back period in seconds for the recommendation.
- management_
option str - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max float
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min float
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead float
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
- apply
Threshold Number - The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value
- apply
Threshold Property MapStrategy - Resource apply threshold strategy settings. The default strategy is
PERCENTAGE
with percentage value set to 0.1. - args List<String>
- The arguments for the function - i.e. for
QUANTILE
this should be a [0, 1] float.MAX
doesn't accept any args - function String
- The function used to calculate the resource recommendation. Supported values:
QUANTILE
,MAX
- limit Property Map
- Resource limit settings
- look
Back NumberPeriod Seconds - The look back period in seconds for the recommendation.
- management
Option String - Disables management for a single resource when set to
READ_ONLY
. The resource will use its original workload template requests and limits. Supported value:READ_ONLY
. Minimum required workload-autoscaler version:v0.23.1
. - max Number
- Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- min Number
- Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores.
- overhead Number
- Overhead for the recommendation, e.g.
0.1
will result in 10% higher recommendation
WorkloadScalingPolicyMemoryApplyThresholdStrategy, WorkloadScalingPolicyMemoryApplyThresholdStrategyArgs
- Type string
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- Denominator string
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Exponent double
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- Numerator double
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Percentage double
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- Type string
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- Denominator string
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Exponent float64
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- Numerator float64
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- Percentage float64
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type String
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator String
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent Double
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator Double
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage Double
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type string
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator string
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent number
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator number
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage number
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type str
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator str
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent float
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator float
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage float
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
- type String
- Defines apply theshold strategy type. - PERCENTAGE - recommendation will be applied when diff of current requests and new recommendation is greater than set value - DEFAULT_ADAPTIVE - will pick larger threshold percentage for small workloads and smaller percentage for large workloads. - CUSTOM_ADAPTIVE - works in same way as DEFAULT_ADAPTIVE, but it allows to tweak parameters of adaptive threshold formula: percentage = numerator/(currentRequest + denominator)^exponent. This strategy is for advance use cases, we recommend to use DEFAULT_ADAPTIVE strategy.
- denominator String
- If denominator is close or equal to 0, the threshold will be much bigger for small values.For example when numerator, exponent is 1 and denominator is 0 the threshold for 0.5 req. CPU will be 200%.It must be defined for the CUSTOM_ADAPTIVE strategy.
- exponent Number
- The exponent changes how fast the curve is going down. The smaller value will cause that we won’t pick extremely small number for big resources, for example: - if numerator is 0, denominator is 1, and exponent is 1, for 50 CPU we will pick 2% threshold - if numerator is 0, denominator is 1, and exponent is 0.8, for 50 CPU we will pick 4.3% threshold It must be defined for the CUSTOM_ADAPTIVE strategy.
- numerator Number
- The numerator affects vertical stretch of function used in adaptive threshold - smaller number will create smaller threshold.It must be defined for the CUSTOM_ADAPTIVE strategy.
- percentage Number
- Percentage of a how much difference should there be between the current pod requests and the new recommendation. It must be defined for the PERCENTAGE strategy.
WorkloadScalingPolicyMemoryEvent, WorkloadScalingPolicyMemoryEventArgs
- Apply
Type string - Defines the apply type to be used when applying recommendation for memory related event. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- Apply
Type string - Defines the apply type to be used when applying recommendation for memory related event. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply
Type String - Defines the apply type to be used when applying recommendation for memory related event. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply
Type string - Defines the apply type to be used when applying recommendation for memory related event. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply_
type str - Defines the apply type to be used when applying recommendation for memory related event. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
- apply
Type String - Defines the apply type to be used when applying recommendation for memory related event. - IMMEDIATE - pods are restarted immediately when new recommendation is generated. - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.)
WorkloadScalingPolicyMemoryLimit, WorkloadScalingPolicyMemoryLimitArgs
- Type string
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- Multiplier double
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- Type string
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- Multiplier float64
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type String
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier Double
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type string
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier number
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type str
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier float
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
- type String
- Defines limit strategy type. - NO_LIMIT - removes the resource limit even if it was specified in the workload spec. - MULTIPLIER - used to calculate the resource limit. The final value is determined by multiplying the resource request by the specified factor.
- multiplier Number
- Multiplier used to calculate the resource limit. It must be defined for the MULTIPLIER strategy.
WorkloadScalingPolicyStartup, WorkloadScalingPolicyStartupArgs
- Period
Seconds double - Defines the duration (in seconds) during which elevated resource usage is expected at startup. When set, recommendations will be adjusted to disregard resource spikes within this period. If not specified, the workload will receive standard recommendations without startup considerations.
- Period
Seconds float64 - Defines the duration (in seconds) during which elevated resource usage is expected at startup. When set, recommendations will be adjusted to disregard resource spikes within this period. If not specified, the workload will receive standard recommendations without startup considerations.
- period
Seconds Double - Defines the duration (in seconds) during which elevated resource usage is expected at startup. When set, recommendations will be adjusted to disregard resource spikes within this period. If not specified, the workload will receive standard recommendations without startup considerations.
- period
Seconds number - Defines the duration (in seconds) during which elevated resource usage is expected at startup. When set, recommendations will be adjusted to disregard resource spikes within this period. If not specified, the workload will receive standard recommendations without startup considerations.
- period_
seconds float - Defines the duration (in seconds) during which elevated resource usage is expected at startup. When set, recommendations will be adjusted to disregard resource spikes within this period. If not specified, the workload will receive standard recommendations without startup considerations.
- period
Seconds Number - Defines the duration (in seconds) during which elevated resource usage is expected at startup. When set, recommendations will be adjusted to disregard resource spikes within this period. If not specified, the workload will receive standard recommendations without startup considerations.
WorkloadScalingPolicyTimeouts, WorkloadScalingPolicyTimeoutsArgs
Package Details
- Repository
- castai castai/terraform-provider-castai
- License
- Notes
- This Pulumi package is based on the
castai
Terraform Provider.