1. Packages
  2. Konnect Provider
  3. API Docs
  4. GatewayPluginAiProxyAdvanced
konnect 2.4.1 published on Thursday, Mar 13, 2025 by kong

konnect.GatewayPluginAiProxyAdvanced

Explore with Pulumi AI

konnect logo
konnect 2.4.1 published on Thursday, Mar 13, 2025 by kong

    GatewayPluginAiProxyAdvanced Resource

    Example Usage

    Coming soon!
    
    Coming soon!
    
    Coming soon!
    
    Coming soon!
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.konnect.GatewayPluginAiProxyAdvanced;
    import com.pulumi.konnect.GatewayPluginAiProxyAdvancedArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigBalancerArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsAuthArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsModelArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptionsArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigVectordbArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConfigVectordbRedisArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConsumerArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedConsumerGroupArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedOrderingArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedOrderingAfterArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedOrderingBeforeArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedRouteArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyAdvancedServiceArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var myGatewaypluginaiproxyadvanced = new GatewayPluginAiProxyAdvanced("myGatewaypluginaiproxyadvanced", GatewayPluginAiProxyAdvancedArgs.builder()
                .config(GatewayPluginAiProxyAdvancedConfigArgs.builder()
                    .balancer(GatewayPluginAiProxyAdvancedConfigBalancerArgs.builder()
                        .algorithm("lowest-latency")
                        .connectTimeout(1069677678)
                        .hashOnHeader("...my_hash_on_header...")
                        .latencyStrategy("e2e")
                        .readTimeout(1128540479)
                        .retries(14809)
                        .slots(27573)
                        .tokensCountStrategy("prompt-tokens")
                        .writeTimeout(1475900303)
                        .build())
                    .embeddings(GatewayPluginAiProxyAdvancedConfigEmbeddingsArgs.builder()
                        .auth(GatewayPluginAiProxyAdvancedConfigEmbeddingsAuthArgs.builder()
                            .allowOverride(true)
                            .awsAccessKeyId("...my_aws_access_key_id...")
                            .awsSecretAccessKey("...my_aws_secret_access_key...")
                            .azureClientId("...my_azure_client_id...")
                            .azureClientSecret("...my_azure_client_secret...")
                            .azureTenantId("...my_azure_tenant_id...")
                            .azureUseManagedIdentity(false)
                            .gcpServiceAccountJson("...my_gcp_service_account_json...")
                            .gcpUseServiceAccount(false)
                            .headerName("...my_header_name...")
                            .headerValue("...my_header_value...")
                            .paramLocation("body")
                            .paramName("...my_param_name...")
                            .paramValue("...my_param_value...")
                            .build())
                        .model(GatewayPluginAiProxyAdvancedConfigEmbeddingsModelArgs.builder()
                            .name("...my_name...")
                            .options(GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptionsArgs.builder()
                                .upstreamUrl("...my_upstream_url...")
                                .build())
                            .provider("mistral")
                            .build())
                        .build())
                    .max_request_body_size(5)
                    .model_name_header(true)
                    .response_streaming("allow")
                    .targets(GatewayPluginAiProxyAdvancedConfigTargetArgs.builder()
                        .auth(GatewayPluginAiProxyAdvancedConfigTargetAuthArgs.builder()
                            .allowOverride(true)
                            .awsAccessKeyId("...my_aws_access_key_id...")
                            .awsSecretAccessKey("...my_aws_secret_access_key...")
                            .azureClientId("...my_azure_client_id...")
                            .azureClientSecret("...my_azure_client_secret...")
                            .azureTenantId("...my_azure_tenant_id...")
                            .azureUseManagedIdentity(true)
                            .gcpServiceAccountJson("...my_gcp_service_account_json...")
                            .gcpUseServiceAccount(false)
                            .headerName("...my_header_name...")
                            .headerValue("...my_header_value...")
                            .paramLocation("query")
                            .paramName("...my_param_name...")
                            .paramValue("...my_param_value...")
                            .build())
                        .description("...my_description...")
                        .logging(GatewayPluginAiProxyAdvancedConfigTargetLoggingArgs.builder()
                            .logPayloads(true)
                            .logStatistics(true)
                            .build())
                        .model(GatewayPluginAiProxyAdvancedConfigTargetModelArgs.builder()
                            .name("...my_name...")
                            .options(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsArgs.builder()
                                .anthropicVersion("...my_anthropic_version...")
                                .azureApiVersion("...my_azure_api_version...")
                                .azureDeploymentId("...my_azure_deployment_id...")
                                .azureInstance("...my_azure_instance...")
                                .bedrock(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrockArgs.builder()
                                    .awsRegion("...my_aws_region...")
                                    .build())
                                .gemini(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGeminiArgs.builder()
                                    .apiEndpoint("...my_api_endpoint...")
                                    .locationId("...my_location_id...")
                                    .projectId("...my_project_id...")
                                    .build())
                                .huggingface(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingfaceArgs.builder()
                                    .useCache(true)
                                    .waitForModel(false)
                                    .build())
                                .inputCost(2.57)
                                .llama2Format("openai")
                                .maxTokens(2)
                                .mistralFormat("openai")
                                .outputCost(7.34)
                                .temperature(3.51)
                                .topK(204)
                                .topP(0.37)
                                .upstreamPath("...my_upstream_path...")
                                .upstreamUrl("...my_upstream_url...")
                                .build())
                            .provider("bedrock")
                            .build())
                        .routeType("llm/v1/completions")
                        .weight(58189)
                        .build())
                    .vectordb(GatewayPluginAiProxyAdvancedConfigVectordbArgs.builder()
                        .dimensions(3)
                        .distanceMetric("euclidean")
                        .redis(GatewayPluginAiProxyAdvancedConfigVectordbRedisArgs.builder()
                            .clusterMaxRedirections(4)
                            .clusterNodes(GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNodeArgs.builder()
                                .ip("...my_ip...")
                                .port(50944)
                                .build())
                            .connectTimeout(656443886)
                            .connectionIsProxied(false)
                            .database(10)
                            .host("...my_host...")
                            .keepaliveBacklog(251172057)
                            .keepalivePoolSize(1127137192)
                            .password("...my_password...")
                            .port(31201)
                            .readTimeout(1222450418)
                            .sendTimeout(1541453227)
                            .sentinelMaster("...my_sentinel_master...")
                            .sentinelNodes(GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNodeArgs.builder()
                                .host("...my_host...")
                                .port(61553)
                                .build())
                            .sentinelPassword("...my_sentinel_password...")
                            .sentinelRole("master")
                            .sentinelUsername("...my_sentinel_username...")
                            .serverName("...my_server_name...")
                            .ssl(true)
                            .sslVerify(false)
                            .username("...my_username...")
                            .build())
                        .strategy("redis")
                        .threshold(6.56)
                        .build())
                    .build())
                .consumer(GatewayPluginAiProxyAdvancedConsumerArgs.builder()
                    .id("...my_id...")
                    .build())
                .consumerGroup(GatewayPluginAiProxyAdvancedConsumerGroupArgs.builder()
                    .id("...my_id...")
                    .build())
                .controlPlaneId("9524ec7d-36d9-465d-a8c5-83a3c9390458")
                .enabled(true)
                .gatewayPluginAiProxyAdvancedId("...my_id...")
                .instanceName("...my_instance_name...")
                .ordering(GatewayPluginAiProxyAdvancedOrderingArgs.builder()
                    .after(GatewayPluginAiProxyAdvancedOrderingAfterArgs.builder()
                        .access("...")
                        .build())
                    .before(GatewayPluginAiProxyAdvancedOrderingBeforeArgs.builder()
                        .access("...")
                        .build())
                    .build())
                .protocols("https")
                .route(GatewayPluginAiProxyAdvancedRouteArgs.builder()
                    .id("...my_id...")
                    .build())
                .service(GatewayPluginAiProxyAdvancedServiceArgs.builder()
                    .id("...my_id...")
                    .build())
                .tags("...")
                .build());
    
        }
    }
    
    resources:
      myGatewaypluginaiproxyadvanced:
        type: konnect:GatewayPluginAiProxyAdvanced
        properties:
          config:
            balancer:
              algorithm: lowest-latency
              connectTimeout: 1.069677678e+09
              hashOnHeader: '...my_hash_on_header...'
              latencyStrategy: e2e
              readTimeout: 1.128540479e+09
              retries: 14809
              slots: 27573
              tokensCountStrategy: prompt-tokens
              writeTimeout: 1.475900303e+09
            embeddings:
              auth:
                allowOverride: true
                awsAccessKeyId: '...my_aws_access_key_id...'
                awsSecretAccessKey: '...my_aws_secret_access_key...'
                azureClientId: '...my_azure_client_id...'
                azureClientSecret: '...my_azure_client_secret...'
                azureTenantId: '...my_azure_tenant_id...'
                azureUseManagedIdentity: false
                gcpServiceAccountJson: '...my_gcp_service_account_json...'
                gcpUseServiceAccount: false
                headerName: '...my_header_name...'
                headerValue: '...my_header_value...'
                paramLocation: body
                paramName: '...my_param_name...'
                paramValue: '...my_param_value...'
              model:
                name: '...my_name...'
                options:
                  upstreamUrl: '...my_upstream_url...'
                provider: mistral
            max_request_body_size: 5
            model_name_header: true
            response_streaming: allow
            targets:
              - auth:
                  allowOverride: true
                  awsAccessKeyId: '...my_aws_access_key_id...'
                  awsSecretAccessKey: '...my_aws_secret_access_key...'
                  azureClientId: '...my_azure_client_id...'
                  azureClientSecret: '...my_azure_client_secret...'
                  azureTenantId: '...my_azure_tenant_id...'
                  azureUseManagedIdentity: true
                  gcpServiceAccountJson: '...my_gcp_service_account_json...'
                  gcpUseServiceAccount: false
                  headerName: '...my_header_name...'
                  headerValue: '...my_header_value...'
                  paramLocation: query
                  paramName: '...my_param_name...'
                  paramValue: '...my_param_value...'
                description: '...my_description...'
                logging:
                  logPayloads: true
                  logStatistics: true
                model:
                  name: '...my_name...'
                  options:
                    anthropicVersion: '...my_anthropic_version...'
                    azureApiVersion: '...my_azure_api_version...'
                    azureDeploymentId: '...my_azure_deployment_id...'
                    azureInstance: '...my_azure_instance...'
                    bedrock:
                      awsRegion: '...my_aws_region...'
                    gemini:
                      apiEndpoint: '...my_api_endpoint...'
                      locationId: '...my_location_id...'
                      projectId: '...my_project_id...'
                    huggingface:
                      useCache: true
                      waitForModel: false
                    inputCost: 2.57
                    llama2Format: openai
                    maxTokens: 2
                    mistralFormat: openai
                    outputCost: 7.34
                    temperature: 3.51
                    topK: 204
                    topP: 0.37
                    upstreamPath: '...my_upstream_path...'
                    upstreamUrl: '...my_upstream_url...'
                  provider: bedrock
                routeType: llm/v1/completions
                weight: 58189
            vectordb:
              dimensions: 3
              distanceMetric: euclidean
              redis:
                clusterMaxRedirections: 4
                clusterNodes:
                  - ip: '...my_ip...'
                    port: 50944
                connectTimeout: 6.56443886e+08
                connectionIsProxied: false
                database: 10
                host: '...my_host...'
                keepaliveBacklog: 2.51172057e+08
                keepalivePoolSize: 1.127137192e+09
                password: '...my_password...'
                port: 31201
                readTimeout: 1.222450418e+09
                sendTimeout: 1.541453227e+09
                sentinelMaster: '...my_sentinel_master...'
                sentinelNodes:
                  - host: '...my_host...'
                    port: 61553
                sentinelPassword: '...my_sentinel_password...'
                sentinelRole: master
                sentinelUsername: '...my_sentinel_username...'
                serverName: '...my_server_name...'
                ssl: true
                sslVerify: false
                username: '...my_username...'
              strategy: redis
              threshold: 6.56
          consumer:
            id: '...my_id...'
          consumerGroup:
            id: '...my_id...'
          controlPlaneId: 9524ec7d-36d9-465d-a8c5-83a3c9390458
          enabled: true
          gatewayPluginAiProxyAdvancedId: '...my_id...'
          instanceName: '...my_instance_name...'
          ordering:
            after:
              access:
                - '...'
            before:
              access:
                - '...'
          protocols:
            - https
          route:
            id: '...my_id...'
          service:
            id: '...my_id...'
          tags:
            - '...'
    

    Create GatewayPluginAiProxyAdvanced Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new GatewayPluginAiProxyAdvanced(name: string, args: GatewayPluginAiProxyAdvancedArgs, opts?: CustomResourceOptions);
    @overload
    def GatewayPluginAiProxyAdvanced(resource_name: str,
                                     args: GatewayPluginAiProxyAdvancedArgs,
                                     opts: Optional[ResourceOptions] = None)
    
    @overload
    def GatewayPluginAiProxyAdvanced(resource_name: str,
                                     opts: Optional[ResourceOptions] = None,
                                     config: Optional[GatewayPluginAiProxyAdvancedConfigArgs] = None,
                                     control_plane_id: Optional[str] = None,
                                     consumer: Optional[GatewayPluginAiProxyAdvancedConsumerArgs] = None,
                                     consumer_group: Optional[GatewayPluginAiProxyAdvancedConsumerGroupArgs] = None,
                                     enabled: Optional[bool] = None,
                                     gateway_plugin_ai_proxy_advanced_id: Optional[str] = None,
                                     instance_name: Optional[str] = None,
                                     ordering: Optional[GatewayPluginAiProxyAdvancedOrderingArgs] = None,
                                     protocols: Optional[Sequence[str]] = None,
                                     route: Optional[GatewayPluginAiProxyAdvancedRouteArgs] = None,
                                     service: Optional[GatewayPluginAiProxyAdvancedServiceArgs] = None,
                                     tags: Optional[Sequence[str]] = None)
    func NewGatewayPluginAiProxyAdvanced(ctx *Context, name string, args GatewayPluginAiProxyAdvancedArgs, opts ...ResourceOption) (*GatewayPluginAiProxyAdvanced, error)
    public GatewayPluginAiProxyAdvanced(string name, GatewayPluginAiProxyAdvancedArgs args, CustomResourceOptions? opts = null)
    public GatewayPluginAiProxyAdvanced(String name, GatewayPluginAiProxyAdvancedArgs args)
    public GatewayPluginAiProxyAdvanced(String name, GatewayPluginAiProxyAdvancedArgs args, CustomResourceOptions options)
    
    type: konnect:GatewayPluginAiProxyAdvanced
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args GatewayPluginAiProxyAdvancedArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args GatewayPluginAiProxyAdvancedArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args GatewayPluginAiProxyAdvancedArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args GatewayPluginAiProxyAdvancedArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args GatewayPluginAiProxyAdvancedArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var gatewayPluginAiProxyAdvancedResource = new Konnect.GatewayPluginAiProxyAdvanced("gatewayPluginAiProxyAdvancedResource", new()
    {
        Config = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigArgs
        {
            Balancer = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigBalancerArgs
            {
                Algorithm = "string",
                ConnectTimeout = 0,
                HashOnHeader = "string",
                LatencyStrategy = "string",
                ReadTimeout = 0,
                Retries = 0,
                Slots = 0,
                TokensCountStrategy = "string",
                WriteTimeout = 0,
            },
            Embeddings = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsArgs
            {
                Auth = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsAuthArgs
                {
                    AllowOverride = false,
                    AwsAccessKeyId = "string",
                    AwsSecretAccessKey = "string",
                    AzureClientId = "string",
                    AzureClientSecret = "string",
                    AzureTenantId = "string",
                    AzureUseManagedIdentity = false,
                    GcpServiceAccountJson = "string",
                    GcpUseServiceAccount = false,
                    HeaderName = "string",
                    HeaderValue = "string",
                    ParamLocation = "string",
                    ParamName = "string",
                    ParamValue = "string",
                },
                Model = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsModelArgs
                {
                    Name = "string",
                    Options = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptionsArgs
                    {
                        UpstreamUrl = "string",
                    },
                    Provider = "string",
                },
            },
            MaxRequestBodySize = 0,
            ModelNameHeader = false,
            ResponseStreaming = "string",
            Targets = new[]
            {
                new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetArgs
                {
                    Auth = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetAuthArgs
                    {
                        AllowOverride = false,
                        AwsAccessKeyId = "string",
                        AwsSecretAccessKey = "string",
                        AzureClientId = "string",
                        AzureClientSecret = "string",
                        AzureTenantId = "string",
                        AzureUseManagedIdentity = false,
                        GcpServiceAccountJson = "string",
                        GcpUseServiceAccount = false,
                        HeaderName = "string",
                        HeaderValue = "string",
                        ParamLocation = "string",
                        ParamName = "string",
                        ParamValue = "string",
                    },
                    Description = "string",
                    Logging = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetLoggingArgs
                    {
                        LogPayloads = false,
                        LogStatistics = false,
                    },
                    Model = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetModelArgs
                    {
                        Name = "string",
                        Options = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsArgs
                        {
                            AnthropicVersion = "string",
                            AzureApiVersion = "string",
                            AzureDeploymentId = "string",
                            AzureInstance = "string",
                            Bedrock = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrockArgs
                            {
                                AwsRegion = "string",
                            },
                            Gemini = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGeminiArgs
                            {
                                ApiEndpoint = "string",
                                LocationId = "string",
                                ProjectId = "string",
                            },
                            Huggingface = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingfaceArgs
                            {
                                UseCache = false,
                                WaitForModel = false,
                            },
                            InputCost = 0,
                            Llama2Format = "string",
                            MaxTokens = 0,
                            MistralFormat = "string",
                            OutputCost = 0,
                            Temperature = 0,
                            TopK = 0,
                            TopP = 0,
                            UpstreamPath = "string",
                            UpstreamUrl = "string",
                        },
                        Provider = "string",
                    },
                    RouteType = "string",
                    Weight = 0,
                },
            },
            Vectordb = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigVectordbArgs
            {
                Dimensions = 0,
                DistanceMetric = "string",
                Redis = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigVectordbRedisArgs
                {
                    ClusterMaxRedirections = 0,
                    ClusterNodes = new[]
                    {
                        new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNodeArgs
                        {
                            Ip = "string",
                            Port = 0,
                        },
                    },
                    ConnectTimeout = 0,
                    ConnectionIsProxied = false,
                    Database = 0,
                    Host = "string",
                    KeepaliveBacklog = 0,
                    KeepalivePoolSize = 0,
                    Password = "string",
                    Port = 0,
                    ReadTimeout = 0,
                    SendTimeout = 0,
                    SentinelMaster = "string",
                    SentinelNodes = new[]
                    {
                        new Konnect.Inputs.GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNodeArgs
                        {
                            Host = "string",
                            Port = 0,
                        },
                    },
                    SentinelPassword = "string",
                    SentinelRole = "string",
                    SentinelUsername = "string",
                    ServerName = "string",
                    Ssl = false,
                    SslVerify = false,
                    Username = "string",
                },
                Strategy = "string",
                Threshold = 0,
            },
        },
        ControlPlaneId = "string",
        Consumer = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConsumerArgs
        {
            Id = "string",
        },
        ConsumerGroup = new Konnect.Inputs.GatewayPluginAiProxyAdvancedConsumerGroupArgs
        {
            Id = "string",
        },
        Enabled = false,
        GatewayPluginAiProxyAdvancedId = "string",
        InstanceName = "string",
        Ordering = new Konnect.Inputs.GatewayPluginAiProxyAdvancedOrderingArgs
        {
            After = new Konnect.Inputs.GatewayPluginAiProxyAdvancedOrderingAfterArgs
            {
                Accesses = new[]
                {
                    "string",
                },
            },
            Before = new Konnect.Inputs.GatewayPluginAiProxyAdvancedOrderingBeforeArgs
            {
                Accesses = new[]
                {
                    "string",
                },
            },
        },
        Protocols = new[]
        {
            "string",
        },
        Route = new Konnect.Inputs.GatewayPluginAiProxyAdvancedRouteArgs
        {
            Id = "string",
        },
        Service = new Konnect.Inputs.GatewayPluginAiProxyAdvancedServiceArgs
        {
            Id = "string",
        },
        Tags = new[]
        {
            "string",
        },
    });
    
    example, err := konnect.NewGatewayPluginAiProxyAdvanced(ctx, "gatewayPluginAiProxyAdvancedResource", &konnect.GatewayPluginAiProxyAdvancedArgs{
    Config: &.GatewayPluginAiProxyAdvancedConfigArgs{
    Balancer: &.GatewayPluginAiProxyAdvancedConfigBalancerArgs{
    Algorithm: pulumi.String("string"),
    ConnectTimeout: pulumi.Float64(0),
    HashOnHeader: pulumi.String("string"),
    LatencyStrategy: pulumi.String("string"),
    ReadTimeout: pulumi.Float64(0),
    Retries: pulumi.Float64(0),
    Slots: pulumi.Float64(0),
    TokensCountStrategy: pulumi.String("string"),
    WriteTimeout: pulumi.Float64(0),
    },
    Embeddings: &.GatewayPluginAiProxyAdvancedConfigEmbeddingsArgs{
    Auth: &.GatewayPluginAiProxyAdvancedConfigEmbeddingsAuthArgs{
    AllowOverride: pulumi.Bool(false),
    AwsAccessKeyId: pulumi.String("string"),
    AwsSecretAccessKey: pulumi.String("string"),
    AzureClientId: pulumi.String("string"),
    AzureClientSecret: pulumi.String("string"),
    AzureTenantId: pulumi.String("string"),
    AzureUseManagedIdentity: pulumi.Bool(false),
    GcpServiceAccountJson: pulumi.String("string"),
    GcpUseServiceAccount: pulumi.Bool(false),
    HeaderName: pulumi.String("string"),
    HeaderValue: pulumi.String("string"),
    ParamLocation: pulumi.String("string"),
    ParamName: pulumi.String("string"),
    ParamValue: pulumi.String("string"),
    },
    Model: &.GatewayPluginAiProxyAdvancedConfigEmbeddingsModelArgs{
    Name: pulumi.String("string"),
    Options: &.GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptionsArgs{
    UpstreamUrl: pulumi.String("string"),
    },
    Provider: pulumi.String("string"),
    },
    },
    MaxRequestBodySize: pulumi.Float64(0),
    ModelNameHeader: pulumi.Bool(false),
    ResponseStreaming: pulumi.String("string"),
    Targets: .GatewayPluginAiProxyAdvancedConfigTargetArray{
    &.GatewayPluginAiProxyAdvancedConfigTargetArgs{
    Auth: &.GatewayPluginAiProxyAdvancedConfigTargetAuthArgs{
    AllowOverride: pulumi.Bool(false),
    AwsAccessKeyId: pulumi.String("string"),
    AwsSecretAccessKey: pulumi.String("string"),
    AzureClientId: pulumi.String("string"),
    AzureClientSecret: pulumi.String("string"),
    AzureTenantId: pulumi.String("string"),
    AzureUseManagedIdentity: pulumi.Bool(false),
    GcpServiceAccountJson: pulumi.String("string"),
    GcpUseServiceAccount: pulumi.Bool(false),
    HeaderName: pulumi.String("string"),
    HeaderValue: pulumi.String("string"),
    ParamLocation: pulumi.String("string"),
    ParamName: pulumi.String("string"),
    ParamValue: pulumi.String("string"),
    },
    Description: pulumi.String("string"),
    Logging: &.GatewayPluginAiProxyAdvancedConfigTargetLoggingArgs{
    LogPayloads: pulumi.Bool(false),
    LogStatistics: pulumi.Bool(false),
    },
    Model: &.GatewayPluginAiProxyAdvancedConfigTargetModelArgs{
    Name: pulumi.String("string"),
    Options: &.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsArgs{
    AnthropicVersion: pulumi.String("string"),
    AzureApiVersion: pulumi.String("string"),
    AzureDeploymentId: pulumi.String("string"),
    AzureInstance: pulumi.String("string"),
    Bedrock: &.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrockArgs{
    AwsRegion: pulumi.String("string"),
    },
    Gemini: &.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGeminiArgs{
    ApiEndpoint: pulumi.String("string"),
    LocationId: pulumi.String("string"),
    ProjectId: pulumi.String("string"),
    },
    Huggingface: &.GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingfaceArgs{
    UseCache: pulumi.Bool(false),
    WaitForModel: pulumi.Bool(false),
    },
    InputCost: pulumi.Float64(0),
    Llama2Format: pulumi.String("string"),
    MaxTokens: pulumi.Float64(0),
    MistralFormat: pulumi.String("string"),
    OutputCost: pulumi.Float64(0),
    Temperature: pulumi.Float64(0),
    TopK: pulumi.Float64(0),
    TopP: pulumi.Float64(0),
    UpstreamPath: pulumi.String("string"),
    UpstreamUrl: pulumi.String("string"),
    },
    Provider: pulumi.String("string"),
    },
    RouteType: pulumi.String("string"),
    Weight: pulumi.Float64(0),
    },
    },
    Vectordb: &.GatewayPluginAiProxyAdvancedConfigVectordbArgs{
    Dimensions: pulumi.Float64(0),
    DistanceMetric: pulumi.String("string"),
    Redis: &.GatewayPluginAiProxyAdvancedConfigVectordbRedisArgs{
    ClusterMaxRedirections: pulumi.Float64(0),
    ClusterNodes: .GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNodeArray{
    &.GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNodeArgs{
    Ip: pulumi.String("string"),
    Port: pulumi.Float64(0),
    },
    },
    ConnectTimeout: pulumi.Float64(0),
    ConnectionIsProxied: pulumi.Bool(false),
    Database: pulumi.Float64(0),
    Host: pulumi.String("string"),
    KeepaliveBacklog: pulumi.Float64(0),
    KeepalivePoolSize: pulumi.Float64(0),
    Password: pulumi.String("string"),
    Port: pulumi.Float64(0),
    ReadTimeout: pulumi.Float64(0),
    SendTimeout: pulumi.Float64(0),
    SentinelMaster: pulumi.String("string"),
    SentinelNodes: .GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNodeArray{
    &.GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNodeArgs{
    Host: pulumi.String("string"),
    Port: pulumi.Float64(0),
    },
    },
    SentinelPassword: pulumi.String("string"),
    SentinelRole: pulumi.String("string"),
    SentinelUsername: pulumi.String("string"),
    ServerName: pulumi.String("string"),
    Ssl: pulumi.Bool(false),
    SslVerify: pulumi.Bool(false),
    Username: pulumi.String("string"),
    },
    Strategy: pulumi.String("string"),
    Threshold: pulumi.Float64(0),
    },
    },
    ControlPlaneId: pulumi.String("string"),
    Consumer: &.GatewayPluginAiProxyAdvancedConsumerArgs{
    Id: pulumi.String("string"),
    },
    ConsumerGroup: &.GatewayPluginAiProxyAdvancedConsumerGroupArgs{
    Id: pulumi.String("string"),
    },
    Enabled: pulumi.Bool(false),
    GatewayPluginAiProxyAdvancedId: pulumi.String("string"),
    InstanceName: pulumi.String("string"),
    Ordering: &.GatewayPluginAiProxyAdvancedOrderingArgs{
    After: &.GatewayPluginAiProxyAdvancedOrderingAfterArgs{
    Accesses: pulumi.StringArray{
    pulumi.String("string"),
    },
    },
    Before: &.GatewayPluginAiProxyAdvancedOrderingBeforeArgs{
    Accesses: pulumi.StringArray{
    pulumi.String("string"),
    },
    },
    },
    Protocols: pulumi.StringArray{
    pulumi.String("string"),
    },
    Route: &.GatewayPluginAiProxyAdvancedRouteArgs{
    Id: pulumi.String("string"),
    },
    Service: &.GatewayPluginAiProxyAdvancedServiceArgs{
    Id: pulumi.String("string"),
    },
    Tags: pulumi.StringArray{
    pulumi.String("string"),
    },
    })
    
    var gatewayPluginAiProxyAdvancedResource = new GatewayPluginAiProxyAdvanced("gatewayPluginAiProxyAdvancedResource", GatewayPluginAiProxyAdvancedArgs.builder()
        .config(GatewayPluginAiProxyAdvancedConfigArgs.builder()
            .balancer(GatewayPluginAiProxyAdvancedConfigBalancerArgs.builder()
                .algorithm("string")
                .connectTimeout(0)
                .hashOnHeader("string")
                .latencyStrategy("string")
                .readTimeout(0)
                .retries(0)
                .slots(0)
                .tokensCountStrategy("string")
                .writeTimeout(0)
                .build())
            .embeddings(GatewayPluginAiProxyAdvancedConfigEmbeddingsArgs.builder()
                .auth(GatewayPluginAiProxyAdvancedConfigEmbeddingsAuthArgs.builder()
                    .allowOverride(false)
                    .awsAccessKeyId("string")
                    .awsSecretAccessKey("string")
                    .azureClientId("string")
                    .azureClientSecret("string")
                    .azureTenantId("string")
                    .azureUseManagedIdentity(false)
                    .gcpServiceAccountJson("string")
                    .gcpUseServiceAccount(false)
                    .headerName("string")
                    .headerValue("string")
                    .paramLocation("string")
                    .paramName("string")
                    .paramValue("string")
                    .build())
                .model(GatewayPluginAiProxyAdvancedConfigEmbeddingsModelArgs.builder()
                    .name("string")
                    .options(GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptionsArgs.builder()
                        .upstreamUrl("string")
                        .build())
                    .provider("string")
                    .build())
                .build())
            .maxRequestBodySize(0)
            .modelNameHeader(false)
            .responseStreaming("string")
            .targets(GatewayPluginAiProxyAdvancedConfigTargetArgs.builder()
                .auth(GatewayPluginAiProxyAdvancedConfigTargetAuthArgs.builder()
                    .allowOverride(false)
                    .awsAccessKeyId("string")
                    .awsSecretAccessKey("string")
                    .azureClientId("string")
                    .azureClientSecret("string")
                    .azureTenantId("string")
                    .azureUseManagedIdentity(false)
                    .gcpServiceAccountJson("string")
                    .gcpUseServiceAccount(false)
                    .headerName("string")
                    .headerValue("string")
                    .paramLocation("string")
                    .paramName("string")
                    .paramValue("string")
                    .build())
                .description("string")
                .logging(GatewayPluginAiProxyAdvancedConfigTargetLoggingArgs.builder()
                    .logPayloads(false)
                    .logStatistics(false)
                    .build())
                .model(GatewayPluginAiProxyAdvancedConfigTargetModelArgs.builder()
                    .name("string")
                    .options(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsArgs.builder()
                        .anthropicVersion("string")
                        .azureApiVersion("string")
                        .azureDeploymentId("string")
                        .azureInstance("string")
                        .bedrock(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrockArgs.builder()
                            .awsRegion("string")
                            .build())
                        .gemini(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGeminiArgs.builder()
                            .apiEndpoint("string")
                            .locationId("string")
                            .projectId("string")
                            .build())
                        .huggingface(GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingfaceArgs.builder()
                            .useCache(false)
                            .waitForModel(false)
                            .build())
                        .inputCost(0)
                        .llama2Format("string")
                        .maxTokens(0)
                        .mistralFormat("string")
                        .outputCost(0)
                        .temperature(0)
                        .topK(0)
                        .topP(0)
                        .upstreamPath("string")
                        .upstreamUrl("string")
                        .build())
                    .provider("string")
                    .build())
                .routeType("string")
                .weight(0)
                .build())
            .vectordb(GatewayPluginAiProxyAdvancedConfigVectordbArgs.builder()
                .dimensions(0)
                .distanceMetric("string")
                .redis(GatewayPluginAiProxyAdvancedConfigVectordbRedisArgs.builder()
                    .clusterMaxRedirections(0)
                    .clusterNodes(GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNodeArgs.builder()
                        .ip("string")
                        .port(0)
                        .build())
                    .connectTimeout(0)
                    .connectionIsProxied(false)
                    .database(0)
                    .host("string")
                    .keepaliveBacklog(0)
                    .keepalivePoolSize(0)
                    .password("string")
                    .port(0)
                    .readTimeout(0)
                    .sendTimeout(0)
                    .sentinelMaster("string")
                    .sentinelNodes(GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNodeArgs.builder()
                        .host("string")
                        .port(0)
                        .build())
                    .sentinelPassword("string")
                    .sentinelRole("string")
                    .sentinelUsername("string")
                    .serverName("string")
                    .ssl(false)
                    .sslVerify(false)
                    .username("string")
                    .build())
                .strategy("string")
                .threshold(0)
                .build())
            .build())
        .controlPlaneId("string")
        .consumer(GatewayPluginAiProxyAdvancedConsumerArgs.builder()
            .id("string")
            .build())
        .consumerGroup(GatewayPluginAiProxyAdvancedConsumerGroupArgs.builder()
            .id("string")
            .build())
        .enabled(false)
        .gatewayPluginAiProxyAdvancedId("string")
        .instanceName("string")
        .ordering(GatewayPluginAiProxyAdvancedOrderingArgs.builder()
            .after(GatewayPluginAiProxyAdvancedOrderingAfterArgs.builder()
                .accesses("string")
                .build())
            .before(GatewayPluginAiProxyAdvancedOrderingBeforeArgs.builder()
                .accesses("string")
                .build())
            .build())
        .protocols("string")
        .route(GatewayPluginAiProxyAdvancedRouteArgs.builder()
            .id("string")
            .build())
        .service(GatewayPluginAiProxyAdvancedServiceArgs.builder()
            .id("string")
            .build())
        .tags("string")
        .build());
    
    gateway_plugin_ai_proxy_advanced_resource = konnect.GatewayPluginAiProxyAdvanced("gatewayPluginAiProxyAdvancedResource",
        config={
            "balancer": {
                "algorithm": "string",
                "connect_timeout": 0,
                "hash_on_header": "string",
                "latency_strategy": "string",
                "read_timeout": 0,
                "retries": 0,
                "slots": 0,
                "tokens_count_strategy": "string",
                "write_timeout": 0,
            },
            "embeddings": {
                "auth": {
                    "allow_override": False,
                    "aws_access_key_id": "string",
                    "aws_secret_access_key": "string",
                    "azure_client_id": "string",
                    "azure_client_secret": "string",
                    "azure_tenant_id": "string",
                    "azure_use_managed_identity": False,
                    "gcp_service_account_json": "string",
                    "gcp_use_service_account": False,
                    "header_name": "string",
                    "header_value": "string",
                    "param_location": "string",
                    "param_name": "string",
                    "param_value": "string",
                },
                "model": {
                    "name": "string",
                    "options": {
                        "upstream_url": "string",
                    },
                    "provider": "string",
                },
            },
            "max_request_body_size": 0,
            "model_name_header": False,
            "response_streaming": "string",
            "targets": [{
                "auth": {
                    "allow_override": False,
                    "aws_access_key_id": "string",
                    "aws_secret_access_key": "string",
                    "azure_client_id": "string",
                    "azure_client_secret": "string",
                    "azure_tenant_id": "string",
                    "azure_use_managed_identity": False,
                    "gcp_service_account_json": "string",
                    "gcp_use_service_account": False,
                    "header_name": "string",
                    "header_value": "string",
                    "param_location": "string",
                    "param_name": "string",
                    "param_value": "string",
                },
                "description": "string",
                "logging": {
                    "log_payloads": False,
                    "log_statistics": False,
                },
                "model": {
                    "name": "string",
                    "options": {
                        "anthropic_version": "string",
                        "azure_api_version": "string",
                        "azure_deployment_id": "string",
                        "azure_instance": "string",
                        "bedrock": {
                            "aws_region": "string",
                        },
                        "gemini": {
                            "api_endpoint": "string",
                            "location_id": "string",
                            "project_id": "string",
                        },
                        "huggingface": {
                            "use_cache": False,
                            "wait_for_model": False,
                        },
                        "input_cost": 0,
                        "llama2_format": "string",
                        "max_tokens": 0,
                        "mistral_format": "string",
                        "output_cost": 0,
                        "temperature": 0,
                        "top_k": 0,
                        "top_p": 0,
                        "upstream_path": "string",
                        "upstream_url": "string",
                    },
                    "provider": "string",
                },
                "route_type": "string",
                "weight": 0,
            }],
            "vectordb": {
                "dimensions": 0,
                "distance_metric": "string",
                "redis": {
                    "cluster_max_redirections": 0,
                    "cluster_nodes": [{
                        "ip": "string",
                        "port": 0,
                    }],
                    "connect_timeout": 0,
                    "connection_is_proxied": False,
                    "database": 0,
                    "host": "string",
                    "keepalive_backlog": 0,
                    "keepalive_pool_size": 0,
                    "password": "string",
                    "port": 0,
                    "read_timeout": 0,
                    "send_timeout": 0,
                    "sentinel_master": "string",
                    "sentinel_nodes": [{
                        "host": "string",
                        "port": 0,
                    }],
                    "sentinel_password": "string",
                    "sentinel_role": "string",
                    "sentinel_username": "string",
                    "server_name": "string",
                    "ssl": False,
                    "ssl_verify": False,
                    "username": "string",
                },
                "strategy": "string",
                "threshold": 0,
            },
        },
        control_plane_id="string",
        consumer={
            "id": "string",
        },
        consumer_group={
            "id": "string",
        },
        enabled=False,
        gateway_plugin_ai_proxy_advanced_id="string",
        instance_name="string",
        ordering={
            "after": {
                "accesses": ["string"],
            },
            "before": {
                "accesses": ["string"],
            },
        },
        protocols=["string"],
        route={
            "id": "string",
        },
        service={
            "id": "string",
        },
        tags=["string"])
    
    const gatewayPluginAiProxyAdvancedResource = new konnect.GatewayPluginAiProxyAdvanced("gatewayPluginAiProxyAdvancedResource", {
        config: {
            balancer: {
                algorithm: "string",
                connectTimeout: 0,
                hashOnHeader: "string",
                latencyStrategy: "string",
                readTimeout: 0,
                retries: 0,
                slots: 0,
                tokensCountStrategy: "string",
                writeTimeout: 0,
            },
            embeddings: {
                auth: {
                    allowOverride: false,
                    awsAccessKeyId: "string",
                    awsSecretAccessKey: "string",
                    azureClientId: "string",
                    azureClientSecret: "string",
                    azureTenantId: "string",
                    azureUseManagedIdentity: false,
                    gcpServiceAccountJson: "string",
                    gcpUseServiceAccount: false,
                    headerName: "string",
                    headerValue: "string",
                    paramLocation: "string",
                    paramName: "string",
                    paramValue: "string",
                },
                model: {
                    name: "string",
                    options: {
                        upstreamUrl: "string",
                    },
                    provider: "string",
                },
            },
            maxRequestBodySize: 0,
            modelNameHeader: false,
            responseStreaming: "string",
            targets: [{
                auth: {
                    allowOverride: false,
                    awsAccessKeyId: "string",
                    awsSecretAccessKey: "string",
                    azureClientId: "string",
                    azureClientSecret: "string",
                    azureTenantId: "string",
                    azureUseManagedIdentity: false,
                    gcpServiceAccountJson: "string",
                    gcpUseServiceAccount: false,
                    headerName: "string",
                    headerValue: "string",
                    paramLocation: "string",
                    paramName: "string",
                    paramValue: "string",
                },
                description: "string",
                logging: {
                    logPayloads: false,
                    logStatistics: false,
                },
                model: {
                    name: "string",
                    options: {
                        anthropicVersion: "string",
                        azureApiVersion: "string",
                        azureDeploymentId: "string",
                        azureInstance: "string",
                        bedrock: {
                            awsRegion: "string",
                        },
                        gemini: {
                            apiEndpoint: "string",
                            locationId: "string",
                            projectId: "string",
                        },
                        huggingface: {
                            useCache: false,
                            waitForModel: false,
                        },
                        inputCost: 0,
                        llama2Format: "string",
                        maxTokens: 0,
                        mistralFormat: "string",
                        outputCost: 0,
                        temperature: 0,
                        topK: 0,
                        topP: 0,
                        upstreamPath: "string",
                        upstreamUrl: "string",
                    },
                    provider: "string",
                },
                routeType: "string",
                weight: 0,
            }],
            vectordb: {
                dimensions: 0,
                distanceMetric: "string",
                redis: {
                    clusterMaxRedirections: 0,
                    clusterNodes: [{
                        ip: "string",
                        port: 0,
                    }],
                    connectTimeout: 0,
                    connectionIsProxied: false,
                    database: 0,
                    host: "string",
                    keepaliveBacklog: 0,
                    keepalivePoolSize: 0,
                    password: "string",
                    port: 0,
                    readTimeout: 0,
                    sendTimeout: 0,
                    sentinelMaster: "string",
                    sentinelNodes: [{
                        host: "string",
                        port: 0,
                    }],
                    sentinelPassword: "string",
                    sentinelRole: "string",
                    sentinelUsername: "string",
                    serverName: "string",
                    ssl: false,
                    sslVerify: false,
                    username: "string",
                },
                strategy: "string",
                threshold: 0,
            },
        },
        controlPlaneId: "string",
        consumer: {
            id: "string",
        },
        consumerGroup: {
            id: "string",
        },
        enabled: false,
        gatewayPluginAiProxyAdvancedId: "string",
        instanceName: "string",
        ordering: {
            after: {
                accesses: ["string"],
            },
            before: {
                accesses: ["string"],
            },
        },
        protocols: ["string"],
        route: {
            id: "string",
        },
        service: {
            id: "string",
        },
        tags: ["string"],
    });
    
    type: konnect:GatewayPluginAiProxyAdvanced
    properties:
        config:
            balancer:
                algorithm: string
                connectTimeout: 0
                hashOnHeader: string
                latencyStrategy: string
                readTimeout: 0
                retries: 0
                slots: 0
                tokensCountStrategy: string
                writeTimeout: 0
            embeddings:
                auth:
                    allowOverride: false
                    awsAccessKeyId: string
                    awsSecretAccessKey: string
                    azureClientId: string
                    azureClientSecret: string
                    azureTenantId: string
                    azureUseManagedIdentity: false
                    gcpServiceAccountJson: string
                    gcpUseServiceAccount: false
                    headerName: string
                    headerValue: string
                    paramLocation: string
                    paramName: string
                    paramValue: string
                model:
                    name: string
                    options:
                        upstreamUrl: string
                    provider: string
            maxRequestBodySize: 0
            modelNameHeader: false
            responseStreaming: string
            targets:
                - auth:
                    allowOverride: false
                    awsAccessKeyId: string
                    awsSecretAccessKey: string
                    azureClientId: string
                    azureClientSecret: string
                    azureTenantId: string
                    azureUseManagedIdentity: false
                    gcpServiceAccountJson: string
                    gcpUseServiceAccount: false
                    headerName: string
                    headerValue: string
                    paramLocation: string
                    paramName: string
                    paramValue: string
                  description: string
                  logging:
                    logPayloads: false
                    logStatistics: false
                  model:
                    name: string
                    options:
                        anthropicVersion: string
                        azureApiVersion: string
                        azureDeploymentId: string
                        azureInstance: string
                        bedrock:
                            awsRegion: string
                        gemini:
                            apiEndpoint: string
                            locationId: string
                            projectId: string
                        huggingface:
                            useCache: false
                            waitForModel: false
                        inputCost: 0
                        llama2Format: string
                        maxTokens: 0
                        mistralFormat: string
                        outputCost: 0
                        temperature: 0
                        topK: 0
                        topP: 0
                        upstreamPath: string
                        upstreamUrl: string
                    provider: string
                  routeType: string
                  weight: 0
            vectordb:
                dimensions: 0
                distanceMetric: string
                redis:
                    clusterMaxRedirections: 0
                    clusterNodes:
                        - ip: string
                          port: 0
                    connectTimeout: 0
                    connectionIsProxied: false
                    database: 0
                    host: string
                    keepaliveBacklog: 0
                    keepalivePoolSize: 0
                    password: string
                    port: 0
                    readTimeout: 0
                    sendTimeout: 0
                    sentinelMaster: string
                    sentinelNodes:
                        - host: string
                          port: 0
                    sentinelPassword: string
                    sentinelRole: string
                    sentinelUsername: string
                    serverName: string
                    ssl: false
                    sslVerify: false
                    username: string
                strategy: string
                threshold: 0
        consumer:
            id: string
        consumerGroup:
            id: string
        controlPlaneId: string
        enabled: false
        gatewayPluginAiProxyAdvancedId: string
        instanceName: string
        ordering:
            after:
                accesses:
                    - string
            before:
                accesses:
                    - string
        protocols:
            - string
        route:
            id: string
        service:
            id: string
        tags:
            - string
    

    GatewayPluginAiProxyAdvanced Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The GatewayPluginAiProxyAdvanced resource accepts the following input properties:

    Config GatewayPluginAiProxyAdvancedConfig
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    Consumer GatewayPluginAiProxyAdvancedConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyAdvancedConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    Enabled bool
    Whether the plugin is applied.
    GatewayPluginAiProxyAdvancedId string
    The ID of this resource.
    InstanceName string
    Ordering GatewayPluginAiProxyAdvancedOrdering
    Protocols List<string>
    A set of strings representing HTTP protocols.
    Route GatewayPluginAiProxyAdvancedRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyAdvancedService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags List<string>
    An optional set of strings associated with the Plugin for grouping and filtering.
    Config GatewayPluginAiProxyAdvancedConfigArgs
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    Consumer GatewayPluginAiProxyAdvancedConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyAdvancedConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    Enabled bool
    Whether the plugin is applied.
    GatewayPluginAiProxyAdvancedId string
    The ID of this resource.
    InstanceName string
    Ordering GatewayPluginAiProxyAdvancedOrderingArgs
    Protocols []string
    A set of strings representing HTTP protocols.
    Route GatewayPluginAiProxyAdvancedRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyAdvancedServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags []string
    An optional set of strings associated with the Plugin for grouping and filtering.
    config GatewayPluginAiProxyAdvancedConfig
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiProxyAdvancedConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyAdvancedConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    enabled Boolean
    Whether the plugin is applied.
    gatewayPluginAiProxyAdvancedId String
    The ID of this resource.
    instanceName String
    ordering GatewayPluginAiProxyAdvancedOrdering
    protocols List<String>
    A set of strings representing HTTP protocols.
    route GatewayPluginAiProxyAdvancedRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyAdvancedService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    config GatewayPluginAiProxyAdvancedConfig
    controlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiProxyAdvancedConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyAdvancedConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    enabled boolean
    Whether the plugin is applied.
    gatewayPluginAiProxyAdvancedId string
    The ID of this resource.
    instanceName string
    ordering GatewayPluginAiProxyAdvancedOrdering
    protocols string[]
    A set of strings representing HTTP protocols.
    route GatewayPluginAiProxyAdvancedRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyAdvancedService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags string[]
    An optional set of strings associated with the Plugin for grouping and filtering.
    config GatewayPluginAiProxyAdvancedConfigArgs
    control_plane_id str
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiProxyAdvancedConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumer_group GatewayPluginAiProxyAdvancedConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    enabled bool
    Whether the plugin is applied.
    gateway_plugin_ai_proxy_advanced_id str
    The ID of this resource.
    instance_name str
    ordering GatewayPluginAiProxyAdvancedOrderingArgs
    protocols Sequence[str]
    A set of strings representing HTTP protocols.
    route GatewayPluginAiProxyAdvancedRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyAdvancedServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags Sequence[str]
    An optional set of strings associated with the Plugin for grouping and filtering.
    config Property Map
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer Property Map
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup Property Map
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    enabled Boolean
    Whether the plugin is applied.
    gatewayPluginAiProxyAdvancedId String
    The ID of this resource.
    instanceName String
    ordering Property Map
    protocols List<String>
    A set of strings representing HTTP protocols.
    route Property Map
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service Property Map
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the GatewayPluginAiProxyAdvanced resource produces the following output properties:

    CreatedAt double
    Unix epoch when the resource was created.
    Id string
    The provider-assigned unique ID for this managed resource.
    UpdatedAt double
    Unix epoch when the resource was last updated.
    CreatedAt float64
    Unix epoch when the resource was created.
    Id string
    The provider-assigned unique ID for this managed resource.
    UpdatedAt float64
    Unix epoch when the resource was last updated.
    createdAt Double
    Unix epoch when the resource was created.
    id String
    The provider-assigned unique ID for this managed resource.
    updatedAt Double
    Unix epoch when the resource was last updated.
    createdAt number
    Unix epoch when the resource was created.
    id string
    The provider-assigned unique ID for this managed resource.
    updatedAt number
    Unix epoch when the resource was last updated.
    created_at float
    Unix epoch when the resource was created.
    id str
    The provider-assigned unique ID for this managed resource.
    updated_at float
    Unix epoch when the resource was last updated.
    createdAt Number
    Unix epoch when the resource was created.
    id String
    The provider-assigned unique ID for this managed resource.
    updatedAt Number
    Unix epoch when the resource was last updated.

    Look up Existing GatewayPluginAiProxyAdvanced Resource

    Get an existing GatewayPluginAiProxyAdvanced resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: GatewayPluginAiProxyAdvancedState, opts?: CustomResourceOptions): GatewayPluginAiProxyAdvanced
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            config: Optional[GatewayPluginAiProxyAdvancedConfigArgs] = None,
            consumer: Optional[GatewayPluginAiProxyAdvancedConsumerArgs] = None,
            consumer_group: Optional[GatewayPluginAiProxyAdvancedConsumerGroupArgs] = None,
            control_plane_id: Optional[str] = None,
            created_at: Optional[float] = None,
            enabled: Optional[bool] = None,
            gateway_plugin_ai_proxy_advanced_id: Optional[str] = None,
            instance_name: Optional[str] = None,
            ordering: Optional[GatewayPluginAiProxyAdvancedOrderingArgs] = None,
            protocols: Optional[Sequence[str]] = None,
            route: Optional[GatewayPluginAiProxyAdvancedRouteArgs] = None,
            service: Optional[GatewayPluginAiProxyAdvancedServiceArgs] = None,
            tags: Optional[Sequence[str]] = None,
            updated_at: Optional[float] = None) -> GatewayPluginAiProxyAdvanced
    func GetGatewayPluginAiProxyAdvanced(ctx *Context, name string, id IDInput, state *GatewayPluginAiProxyAdvancedState, opts ...ResourceOption) (*GatewayPluginAiProxyAdvanced, error)
    public static GatewayPluginAiProxyAdvanced Get(string name, Input<string> id, GatewayPluginAiProxyAdvancedState? state, CustomResourceOptions? opts = null)
    public static GatewayPluginAiProxyAdvanced get(String name, Output<String> id, GatewayPluginAiProxyAdvancedState state, CustomResourceOptions options)
    resources:  _:    type: konnect:GatewayPluginAiProxyAdvanced    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Config GatewayPluginAiProxyAdvancedConfig
    Consumer GatewayPluginAiProxyAdvancedConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyAdvancedConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    CreatedAt double
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied.
    GatewayPluginAiProxyAdvancedId string
    The ID of this resource.
    InstanceName string
    Ordering GatewayPluginAiProxyAdvancedOrdering
    Protocols List<string>
    A set of strings representing HTTP protocols.
    Route GatewayPluginAiProxyAdvancedRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyAdvancedService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags List<string>
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt double
    Unix epoch when the resource was last updated.
    Config GatewayPluginAiProxyAdvancedConfigArgs
    Consumer GatewayPluginAiProxyAdvancedConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyAdvancedConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    CreatedAt float64
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied.
    GatewayPluginAiProxyAdvancedId string
    The ID of this resource.
    InstanceName string
    Ordering GatewayPluginAiProxyAdvancedOrderingArgs
    Protocols []string
    A set of strings representing HTTP protocols.
    Route GatewayPluginAiProxyAdvancedRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyAdvancedServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags []string
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt float64
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyAdvancedConfig
    consumer GatewayPluginAiProxyAdvancedConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyAdvancedConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt Double
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied.
    gatewayPluginAiProxyAdvancedId String
    The ID of this resource.
    instanceName String
    ordering GatewayPluginAiProxyAdvancedOrdering
    protocols List<String>
    A set of strings representing HTTP protocols.
    route GatewayPluginAiProxyAdvancedRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyAdvancedService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Double
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyAdvancedConfig
    consumer GatewayPluginAiProxyAdvancedConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyAdvancedConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt number
    Unix epoch when the resource was created.
    enabled boolean
    Whether the plugin is applied.
    gatewayPluginAiProxyAdvancedId string
    The ID of this resource.
    instanceName string
    ordering GatewayPluginAiProxyAdvancedOrdering
    protocols string[]
    A set of strings representing HTTP protocols.
    route GatewayPluginAiProxyAdvancedRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyAdvancedService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags string[]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt number
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyAdvancedConfigArgs
    consumer GatewayPluginAiProxyAdvancedConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumer_group GatewayPluginAiProxyAdvancedConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    control_plane_id str
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    created_at float
    Unix epoch when the resource was created.
    enabled bool
    Whether the plugin is applied.
    gateway_plugin_ai_proxy_advanced_id str
    The ID of this resource.
    instance_name str
    ordering GatewayPluginAiProxyAdvancedOrderingArgs
    protocols Sequence[str]
    A set of strings representing HTTP protocols.
    route GatewayPluginAiProxyAdvancedRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyAdvancedServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags Sequence[str]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updated_at float
    Unix epoch when the resource was last updated.
    config Property Map
    consumer Property Map
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup Property Map
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt Number
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied.
    gatewayPluginAiProxyAdvancedId String
    The ID of this resource.
    instanceName String
    ordering Property Map
    protocols List<String>
    A set of strings representing HTTP protocols.
    route Property Map
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service Property Map
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Number
    Unix epoch when the resource was last updated.

    Supporting Types

    GatewayPluginAiProxyAdvancedConfig, GatewayPluginAiProxyAdvancedConfigArgs

    Balancer GatewayPluginAiProxyAdvancedConfigBalancer
    Embeddings GatewayPluginAiProxyAdvancedConfigEmbeddings
    MaxRequestBodySize double
    max allowed body size allowed to be introspected
    ModelNameHeader bool
    Display the model name selected in the X-Kong-LLM-Model response header
    ResponseStreaming string
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. must be one of ["allow", "always", "deny"]
    Targets List<GatewayPluginAiProxyAdvancedConfigTarget>
    Vectordb GatewayPluginAiProxyAdvancedConfigVectordb
    Balancer GatewayPluginAiProxyAdvancedConfigBalancer
    Embeddings GatewayPluginAiProxyAdvancedConfigEmbeddings
    MaxRequestBodySize float64
    max allowed body size allowed to be introspected
    ModelNameHeader bool
    Display the model name selected in the X-Kong-LLM-Model response header
    ResponseStreaming string
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. must be one of ["allow", "always", "deny"]
    Targets []GatewayPluginAiProxyAdvancedConfigTarget
    Vectordb GatewayPluginAiProxyAdvancedConfigVectordb
    balancer GatewayPluginAiProxyAdvancedConfigBalancer
    embeddings GatewayPluginAiProxyAdvancedConfigEmbeddings
    maxRequestBodySize Double
    max allowed body size allowed to be introspected
    modelNameHeader Boolean
    Display the model name selected in the X-Kong-LLM-Model response header
    responseStreaming String
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. must be one of ["allow", "always", "deny"]
    targets List<GatewayPluginAiProxyAdvancedConfigTarget>
    vectordb GatewayPluginAiProxyAdvancedConfigVectordb
    balancer GatewayPluginAiProxyAdvancedConfigBalancer
    embeddings GatewayPluginAiProxyAdvancedConfigEmbeddings
    maxRequestBodySize number
    max allowed body size allowed to be introspected
    modelNameHeader boolean
    Display the model name selected in the X-Kong-LLM-Model response header
    responseStreaming string
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. must be one of ["allow", "always", "deny"]
    targets GatewayPluginAiProxyAdvancedConfigTarget[]
    vectordb GatewayPluginAiProxyAdvancedConfigVectordb
    balancer GatewayPluginAiProxyAdvancedConfigBalancer
    embeddings GatewayPluginAiProxyAdvancedConfigEmbeddings
    max_request_body_size float
    max allowed body size allowed to be introspected
    model_name_header bool
    Display the model name selected in the X-Kong-LLM-Model response header
    response_streaming str
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. must be one of ["allow", "always", "deny"]
    targets Sequence[GatewayPluginAiProxyAdvancedConfigTarget]
    vectordb GatewayPluginAiProxyAdvancedConfigVectordb
    balancer Property Map
    embeddings Property Map
    maxRequestBodySize Number
    max allowed body size allowed to be introspected
    modelNameHeader Boolean
    Display the model name selected in the X-Kong-LLM-Model response header
    responseStreaming String
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. must be one of ["allow", "always", "deny"]
    targets List<Property Map>
    vectordb Property Map

    GatewayPluginAiProxyAdvancedConfigBalancer, GatewayPluginAiProxyAdvancedConfigBalancerArgs

    Algorithm string
    Which load balancing algorithm to use. must be one of ["consistent-hashing", "lowest-latency", "lowest-usage", "round-robin", "semantic"]
    ConnectTimeout double
    HashOnHeader string
    The header to use for consistent-hashing.
    LatencyStrategy string
    What metrics to use for latency. Available values are: tpot (time-per-output-token) and e2e. must be one of ["e2e", "tpot"]
    ReadTimeout double
    Retries double
    The number of retries to execute upon failure to proxy.
    Slots double
    The number of slots in the load balancer algorithm.
    TokensCountStrategy string
    What tokens to use for usage calculation. Available values are: total_tokens prompt_tokens, and completion_tokens. must be one of ["completion-tokens", "prompt-tokens", "total-tokens"]
    WriteTimeout double
    Algorithm string
    Which load balancing algorithm to use. must be one of ["consistent-hashing", "lowest-latency", "lowest-usage", "round-robin", "semantic"]
    ConnectTimeout float64
    HashOnHeader string
    The header to use for consistent-hashing.
    LatencyStrategy string
    What metrics to use for latency. Available values are: tpot (time-per-output-token) and e2e. must be one of ["e2e", "tpot"]
    ReadTimeout float64
    Retries float64
    The number of retries to execute upon failure to proxy.
    Slots float64
    The number of slots in the load balancer algorithm.
    TokensCountStrategy string
    What tokens to use for usage calculation. Available values are: total_tokens prompt_tokens, and completion_tokens. must be one of ["completion-tokens", "prompt-tokens", "total-tokens"]
    WriteTimeout float64
    algorithm String
    Which load balancing algorithm to use. must be one of ["consistent-hashing", "lowest-latency", "lowest-usage", "round-robin", "semantic"]
    connectTimeout Double
    hashOnHeader String
    The header to use for consistent-hashing.
    latencyStrategy String
    What metrics to use for latency. Available values are: tpot (time-per-output-token) and e2e. must be one of ["e2e", "tpot"]
    readTimeout Double
    retries Double
    The number of retries to execute upon failure to proxy.
    slots Double
    The number of slots in the load balancer algorithm.
    tokensCountStrategy String
    What tokens to use for usage calculation. Available values are: total_tokens prompt_tokens, and completion_tokens. must be one of ["completion-tokens", "prompt-tokens", "total-tokens"]
    writeTimeout Double
    algorithm string
    Which load balancing algorithm to use. must be one of ["consistent-hashing", "lowest-latency", "lowest-usage", "round-robin", "semantic"]
    connectTimeout number
    hashOnHeader string
    The header to use for consistent-hashing.
    latencyStrategy string
    What metrics to use for latency. Available values are: tpot (time-per-output-token) and e2e. must be one of ["e2e", "tpot"]
    readTimeout number
    retries number
    The number of retries to execute upon failure to proxy.
    slots number
    The number of slots in the load balancer algorithm.
    tokensCountStrategy string
    What tokens to use for usage calculation. Available values are: total_tokens prompt_tokens, and completion_tokens. must be one of ["completion-tokens", "prompt-tokens", "total-tokens"]
    writeTimeout number
    algorithm str
    Which load balancing algorithm to use. must be one of ["consistent-hashing", "lowest-latency", "lowest-usage", "round-robin", "semantic"]
    connect_timeout float
    hash_on_header str
    The header to use for consistent-hashing.
    latency_strategy str
    What metrics to use for latency. Available values are: tpot (time-per-output-token) and e2e. must be one of ["e2e", "tpot"]
    read_timeout float
    retries float
    The number of retries to execute upon failure to proxy.
    slots float
    The number of slots in the load balancer algorithm.
    tokens_count_strategy str
    What tokens to use for usage calculation. Available values are: total_tokens prompt_tokens, and completion_tokens. must be one of ["completion-tokens", "prompt-tokens", "total-tokens"]
    write_timeout float
    algorithm String
    Which load balancing algorithm to use. must be one of ["consistent-hashing", "lowest-latency", "lowest-usage", "round-robin", "semantic"]
    connectTimeout Number
    hashOnHeader String
    The header to use for consistent-hashing.
    latencyStrategy String
    What metrics to use for latency. Available values are: tpot (time-per-output-token) and e2e. must be one of ["e2e", "tpot"]
    readTimeout Number
    retries Number
    The number of retries to execute upon failure to proxy.
    slots Number
    The number of slots in the load balancer algorithm.
    tokensCountStrategy String
    What tokens to use for usage calculation. Available values are: total_tokens prompt_tokens, and completion_tokens. must be one of ["completion-tokens", "prompt-tokens", "total-tokens"]
    writeTimeout Number

    GatewayPluginAiProxyAdvancedConfigEmbeddings, GatewayPluginAiProxyAdvancedConfigEmbeddingsArgs

    GatewayPluginAiProxyAdvancedConfigEmbeddingsAuth, GatewayPluginAiProxyAdvancedConfigEmbeddingsAuthArgs

    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models.
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models.
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models.
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.
    allowOverride boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    awsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount boolean
    Use service account auth for GCP-based providers and models.
    headerName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName string
    If AI model requires authentication via query parameter, specify its name here.
    paramValue string
    Specify the full parameter value for 'param_name'.
    allow_override bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    aws_access_key_id str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    aws_secret_access_key str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azure_client_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azure_client_secret str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azure_tenant_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azure_use_managed_identity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcp_service_account_json str
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcp_use_service_account bool
    Use service account auth for GCP-based providers and models.
    header_name str
    If AI model requires authentication via Authorization or API key header, specify its name here.
    header_value str
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    param_location str
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    param_name str
    If AI model requires authentication via query parameter, specify its name here.
    param_value str
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models.
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.

    GatewayPluginAiProxyAdvancedConfigEmbeddingsModel, GatewayPluginAiProxyAdvancedConfigEmbeddingsModelArgs

    Name string
    Model name to execute. Not Null
    Options GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptions
    Key/value settings for the model
    Provider string
    AI provider format to use for embeddings API. Not Null; must be one of ["mistral", "openai"]
    Name string
    Model name to execute. Not Null
    Options GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptions
    Key/value settings for the model
    Provider string
    AI provider format to use for embeddings API. Not Null; must be one of ["mistral", "openai"]
    name String
    Model name to execute. Not Null
    options GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptions
    Key/value settings for the model
    provider String
    AI provider format to use for embeddings API. Not Null; must be one of ["mistral", "openai"]
    name string
    Model name to execute. Not Null
    options GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptions
    Key/value settings for the model
    provider string
    AI provider format to use for embeddings API. Not Null; must be one of ["mistral", "openai"]
    name str
    Model name to execute. Not Null
    options GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptions
    Key/value settings for the model
    provider str
    AI provider format to use for embeddings API. Not Null; must be one of ["mistral", "openai"]
    name String
    Model name to execute. Not Null
    options Property Map
    Key/value settings for the model
    provider String
    AI provider format to use for embeddings API. Not Null; must be one of ["mistral", "openai"]

    GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptions, GatewayPluginAiProxyAdvancedConfigEmbeddingsModelOptionsArgs

    UpstreamUrl string
    upstream url for the embeddings
    UpstreamUrl string
    upstream url for the embeddings
    upstreamUrl String
    upstream url for the embeddings
    upstreamUrl string
    upstream url for the embeddings
    upstream_url str
    upstream url for the embeddings
    upstreamUrl String
    upstream url for the embeddings

    GatewayPluginAiProxyAdvancedConfigTarget, GatewayPluginAiProxyAdvancedConfigTargetArgs

    Auth GatewayPluginAiProxyAdvancedConfigTargetAuth
    Description string
    The semantic description of the target, required if using semantic load balancing.
    Logging GatewayPluginAiProxyAdvancedConfigTargetLogging
    Not Null
    Model GatewayPluginAiProxyAdvancedConfigTargetModel
    Not Null
    RouteType string
    The model's operation implementation, for this provider. Set to preserve to pass through without transformation. Not Null; must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
    Weight double
    The weight this target gets within the upstream loadbalancer (1-65535).
    Auth GatewayPluginAiProxyAdvancedConfigTargetAuth
    Description string
    The semantic description of the target, required if using semantic load balancing.
    Logging GatewayPluginAiProxyAdvancedConfigTargetLogging
    Not Null
    Model GatewayPluginAiProxyAdvancedConfigTargetModel
    Not Null
    RouteType string
    The model's operation implementation, for this provider. Set to preserve to pass through without transformation. Not Null; must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
    Weight float64
    The weight this target gets within the upstream loadbalancer (1-65535).
    auth GatewayPluginAiProxyAdvancedConfigTargetAuth
    description String
    The semantic description of the target, required if using semantic load balancing.
    logging GatewayPluginAiProxyAdvancedConfigTargetLogging
    Not Null
    model GatewayPluginAiProxyAdvancedConfigTargetModel
    Not Null
    routeType String
    The model's operation implementation, for this provider. Set to preserve to pass through without transformation. Not Null; must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
    weight Double
    The weight this target gets within the upstream loadbalancer (1-65535).
    auth GatewayPluginAiProxyAdvancedConfigTargetAuth
    description string
    The semantic description of the target, required if using semantic load balancing.
    logging GatewayPluginAiProxyAdvancedConfigTargetLogging
    Not Null
    model GatewayPluginAiProxyAdvancedConfigTargetModel
    Not Null
    routeType string
    The model's operation implementation, for this provider. Set to preserve to pass through without transformation. Not Null; must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
    weight number
    The weight this target gets within the upstream loadbalancer (1-65535).
    auth GatewayPluginAiProxyAdvancedConfigTargetAuth
    description str
    The semantic description of the target, required if using semantic load balancing.
    logging GatewayPluginAiProxyAdvancedConfigTargetLogging
    Not Null
    model GatewayPluginAiProxyAdvancedConfigTargetModel
    Not Null
    route_type str
    The model's operation implementation, for this provider. Set to preserve to pass through without transformation. Not Null; must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
    weight float
    The weight this target gets within the upstream loadbalancer (1-65535).
    auth Property Map
    description String
    The semantic description of the target, required if using semantic load balancing.
    logging Property Map
    Not Null
    model Property Map
    Not Null
    routeType String
    The model's operation implementation, for this provider. Set to preserve to pass through without transformation. Not Null; must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
    weight Number
    The weight this target gets within the upstream loadbalancer (1-65535).

    GatewayPluginAiProxyAdvancedConfigTargetAuth, GatewayPluginAiProxyAdvancedConfigTargetAuthArgs

    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models.
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models.
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models.
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.
    allowOverride boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    awsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount boolean
    Use service account auth for GCP-based providers and models.
    headerName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName string
    If AI model requires authentication via query parameter, specify its name here.
    paramValue string
    Specify the full parameter value for 'param_name'.
    allow_override bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    aws_access_key_id str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    aws_secret_access_key str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azure_client_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azure_client_secret str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azure_tenant_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azure_use_managed_identity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcp_service_account_json str
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcp_use_service_account bool
    Use service account auth for GCP-based providers and models.
    header_name str
    If AI model requires authentication via Authorization or API key header, specify its name here.
    header_value str
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    param_location str
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    param_name str
    If AI model requires authentication via query parameter, specify its name here.
    param_value str
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models.
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.

    GatewayPluginAiProxyAdvancedConfigTargetLogging, GatewayPluginAiProxyAdvancedConfigTargetLoggingArgs

    LogPayloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output.
    LogStatistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
    LogPayloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output.
    LogStatistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
    logPayloads Boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output.
    logStatistics Boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
    logPayloads boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output.
    logStatistics boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
    log_payloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output.
    log_statistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
    logPayloads Boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output.
    logStatistics Boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.

    GatewayPluginAiProxyAdvancedConfigTargetModel, GatewayPluginAiProxyAdvancedConfigTargetModelArgs

    Name string
    Model name to execute.
    Options GatewayPluginAiProxyAdvancedConfigTargetModelOptions
    Key/value settings for the model
    Provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. Not Null; must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    Name string
    Model name to execute.
    Options GatewayPluginAiProxyAdvancedConfigTargetModelOptions
    Key/value settings for the model
    Provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. Not Null; must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name String
    Model name to execute.
    options GatewayPluginAiProxyAdvancedConfigTargetModelOptions
    Key/value settings for the model
    provider String
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. Not Null; must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name string
    Model name to execute.
    options GatewayPluginAiProxyAdvancedConfigTargetModelOptions
    Key/value settings for the model
    provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. Not Null; must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name str
    Model name to execute.
    options GatewayPluginAiProxyAdvancedConfigTargetModelOptions
    Key/value settings for the model
    provider str
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. Not Null; must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name String
    Model name to execute.
    options Property Map
    Key/value settings for the model
    provider String
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. Not Null; must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]

    GatewayPluginAiProxyAdvancedConfigTargetModelOptions, GatewayPluginAiProxyAdvancedConfigTargetModelOptionsArgs

    AnthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    AzureApiVersion string
    'api-version' for Azure OpenAI instances.
    AzureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    AzureInstance string
    Instance name for Azure OpenAI hosted models.
    Bedrock GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrock
    Gemini GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGemini
    Huggingface GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingface
    InputCost double
    Defines the cost per 1M tokens in your prompt.
    Llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    MaxTokens double
    Defines the max_tokens, if using chat or completion models.
    MistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    OutputCost double
    Defines the cost per 1M tokens in the output of the AI.
    Temperature double
    Defines the matching temperature, if using chat or completion models.
    TopK double
    Defines the top-k most likely tokens, if supported.
    TopP double
    Defines the top-p probability mass, if supported.
    UpstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    UpstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    AnthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    AzureApiVersion string
    'api-version' for Azure OpenAI instances.
    AzureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    AzureInstance string
    Instance name for Azure OpenAI hosted models.
    Bedrock GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrock
    Gemini GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGemini
    Huggingface GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingface
    InputCost float64
    Defines the cost per 1M tokens in your prompt.
    Llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    MaxTokens float64
    Defines the max_tokens, if using chat or completion models.
    MistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    OutputCost float64
    Defines the cost per 1M tokens in the output of the AI.
    Temperature float64
    Defines the matching temperature, if using chat or completion models.
    TopK float64
    Defines the top-k most likely tokens, if supported.
    TopP float64
    Defines the top-p probability mass, if supported.
    UpstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    UpstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion String
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion String
    'api-version' for Azure OpenAI instances.
    azureDeploymentId String
    Deployment ID for Azure OpenAI instances.
    azureInstance String
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrock
    gemini GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGemini
    huggingface GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingface
    inputCost Double
    Defines the cost per 1M tokens in your prompt.
    llama2Format String
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens Double
    Defines the max_tokens, if using chat or completion models.
    mistralFormat String
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost Double
    Defines the cost per 1M tokens in the output of the AI.
    temperature Double
    Defines the matching temperature, if using chat or completion models.
    topK Double
    Defines the top-k most likely tokens, if supported.
    topP Double
    Defines the top-p probability mass, if supported.
    upstreamPath String
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl String
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion string
    'api-version' for Azure OpenAI instances.
    azureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    azureInstance string
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrock
    gemini GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGemini
    huggingface GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingface
    inputCost number
    Defines the cost per 1M tokens in your prompt.
    llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens number
    Defines the max_tokens, if using chat or completion models.
    mistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost number
    Defines the cost per 1M tokens in the output of the AI.
    temperature number
    Defines the matching temperature, if using chat or completion models.
    topK number
    Defines the top-k most likely tokens, if supported.
    topP number
    Defines the top-p probability mass, if supported.
    upstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropic_version str
    Defines the schema/API version, if using Anthropic provider.
    azure_api_version str
    'api-version' for Azure OpenAI instances.
    azure_deployment_id str
    Deployment ID for Azure OpenAI instances.
    azure_instance str
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrock
    gemini GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGemini
    huggingface GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingface
    input_cost float
    Defines the cost per 1M tokens in your prompt.
    llama2_format str
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    max_tokens float
    Defines the max_tokens, if using chat or completion models.
    mistral_format str
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    output_cost float
    Defines the cost per 1M tokens in the output of the AI.
    temperature float
    Defines the matching temperature, if using chat or completion models.
    top_k float
    Defines the top-k most likely tokens, if supported.
    top_p float
    Defines the top-p probability mass, if supported.
    upstream_path str
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstream_url str
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion String
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion String
    'api-version' for Azure OpenAI instances.
    azureDeploymentId String
    Deployment ID for Azure OpenAI instances.
    azureInstance String
    Instance name for Azure OpenAI hosted models.
    bedrock Property Map
    gemini Property Map
    huggingface Property Map
    inputCost Number
    Defines the cost per 1M tokens in your prompt.
    llama2Format String
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens Number
    Defines the max_tokens, if using chat or completion models.
    mistralFormat String
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost Number
    Defines the cost per 1M tokens in the output of the AI.
    temperature Number
    Defines the matching temperature, if using chat or completion models.
    topK Number
    Defines the top-k most likely tokens, if supported.
    topP Number
    Defines the top-p probability mass, if supported.
    upstreamPath String
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl String
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.

    GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrock, GatewayPluginAiProxyAdvancedConfigTargetModelOptionsBedrockArgs

    AwsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    AwsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRegion String
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    aws_region str
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRegion String
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.

    GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGemini, GatewayPluginAiProxyAdvancedConfigTargetModelOptionsGeminiArgs

    ApiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    LocationId string
    If running Gemini on Vertex, specify the location ID.
    ProjectId string
    If running Gemini on Vertex, specify the project ID.
    ApiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    LocationId string
    If running Gemini on Vertex, specify the location ID.
    ProjectId string
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint String
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    locationId String
    If running Gemini on Vertex, specify the location ID.
    projectId String
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    locationId string
    If running Gemini on Vertex, specify the location ID.
    projectId string
    If running Gemini on Vertex, specify the project ID.
    api_endpoint str
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    location_id str
    If running Gemini on Vertex, specify the location ID.
    project_id str
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint String
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    locationId String
    If running Gemini on Vertex, specify the location ID.
    projectId String
    If running Gemini on Vertex, specify the project ID.

    GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingface, GatewayPluginAiProxyAdvancedConfigTargetModelOptionsHuggingfaceArgs

    UseCache bool
    Use the cache layer on the inference API
    WaitForModel bool
    Wait for the model if it is not ready
    UseCache bool
    Use the cache layer on the inference API
    WaitForModel bool
    Wait for the model if it is not ready
    useCache Boolean
    Use the cache layer on the inference API
    waitForModel Boolean
    Wait for the model if it is not ready
    useCache boolean
    Use the cache layer on the inference API
    waitForModel boolean
    Wait for the model if it is not ready
    use_cache bool
    Use the cache layer on the inference API
    wait_for_model bool
    Wait for the model if it is not ready
    useCache Boolean
    Use the cache layer on the inference API
    waitForModel Boolean
    Wait for the model if it is not ready

    GatewayPluginAiProxyAdvancedConfigVectordb, GatewayPluginAiProxyAdvancedConfigVectordbArgs

    Dimensions double
    the desired dimensionality for the vectors. Not Null
    DistanceMetric string
    the distance metric to use for vector searches. Not Null; must be one of ["cosine", "euclidean"]
    Redis GatewayPluginAiProxyAdvancedConfigVectordbRedis
    Not Null
    Strategy string
    which vector database driver to use. Not Null; must be "redis"
    Threshold double
    the default similarity threshold for accepting semantic search results (float). Not Null
    Dimensions float64
    the desired dimensionality for the vectors. Not Null
    DistanceMetric string
    the distance metric to use for vector searches. Not Null; must be one of ["cosine", "euclidean"]
    Redis GatewayPluginAiProxyAdvancedConfigVectordbRedis
    Not Null
    Strategy string
    which vector database driver to use. Not Null; must be "redis"
    Threshold float64
    the default similarity threshold for accepting semantic search results (float). Not Null
    dimensions Double
    the desired dimensionality for the vectors. Not Null
    distanceMetric String
    the distance metric to use for vector searches. Not Null; must be one of ["cosine", "euclidean"]
    redis GatewayPluginAiProxyAdvancedConfigVectordbRedis
    Not Null
    strategy String
    which vector database driver to use. Not Null; must be "redis"
    threshold Double
    the default similarity threshold for accepting semantic search results (float). Not Null
    dimensions number
    the desired dimensionality for the vectors. Not Null
    distanceMetric string
    the distance metric to use for vector searches. Not Null; must be one of ["cosine", "euclidean"]
    redis GatewayPluginAiProxyAdvancedConfigVectordbRedis
    Not Null
    strategy string
    which vector database driver to use. Not Null; must be "redis"
    threshold number
    the default similarity threshold for accepting semantic search results (float). Not Null
    dimensions float
    the desired dimensionality for the vectors. Not Null
    distance_metric str
    the distance metric to use for vector searches. Not Null; must be one of ["cosine", "euclidean"]
    redis GatewayPluginAiProxyAdvancedConfigVectordbRedis
    Not Null
    strategy str
    which vector database driver to use. Not Null; must be "redis"
    threshold float
    the default similarity threshold for accepting semantic search results (float). Not Null
    dimensions Number
    the desired dimensionality for the vectors. Not Null
    distanceMetric String
    the distance metric to use for vector searches. Not Null; must be one of ["cosine", "euclidean"]
    redis Property Map
    Not Null
    strategy String
    which vector database driver to use. Not Null; must be "redis"
    threshold Number
    the default similarity threshold for accepting semantic search results (float). Not Null

    GatewayPluginAiProxyAdvancedConfigVectordbRedis, GatewayPluginAiProxyAdvancedConfigVectordbRedisArgs

    ClusterMaxRedirections double
    Maximum retry attempts for redirection.
    ClusterNodes List<GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNode>
    Cluster addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Cluster. The minimum length of the array is 1 element.
    ConnectTimeout double
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    ConnectionIsProxied bool
    If the connection to Redis is proxied (e.g. Envoy), set it true. Set the host and port to point to the proxy address.
    Database double
    Database to use for the Redis connection when using the redis strategy
    Host string
    A string representing a host name, such as example.com.
    KeepaliveBacklog double
    Limits the total number of opened connections for a pool. If the connection pool is full, connection queues above the limit go into the backlog queue. If the backlog queue is full, subsequent connect operations fail and return nil. Queued operations (subject to set timeouts) resume once the number of connections in the pool is less than keepalive_pool_size. If latency is high or throughput is low, try increasing this value. Empirically, this value is larger than keepalive_pool_size.
    KeepalivePoolSize double
    The size limit for every cosocket connection pool associated with every remote server, per worker process. If neither keepalive_pool_size nor keepalive_backlog is specified, no pool is created. If keepalive_pool_size isn't specified but keepalive_backlog is specified, then the pool uses the default value. Try to increase (e.g. 512) this value if latency is high or throughput is low.
    Password string
    Password to use for Redis connections. If undefined, no AUTH commands are sent to Redis.
    Port double
    An integer representing a port number between 0 and 65535, inclusive.
    ReadTimeout double
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    SendTimeout double
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    SentinelMaster string
    Sentinel master to use for Redis connections. Defining this value implies using Redis Sentinel.
    SentinelNodes List<GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNode>
    Sentinel node addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Sentinel. The minimum length of the array is 1 element.
    SentinelPassword string
    Sentinel password to authenticate with a Redis Sentinel instance. If undefined, no AUTH commands are sent to Redis Sentinels.
    SentinelRole string
    Sentinel role to use for Redis connections when the redis strategy is defined. Defining this value implies using Redis Sentinel. must be one of ["any", "master", "slave"]
    SentinelUsername string
    Sentinel username to authenticate with a Redis Sentinel instance. If undefined, ACL authentication won't be performed. This requires Redis v6.2.0+.
    ServerName string
    A string representing an SNI (server name indication) value for TLS.
    Ssl bool
    If set to true, uses SSL to connect to Redis.
    SslVerify bool
    If set to true, verifies the validity of the server SSL certificate. If setting this parameter, also configure lua_ssl_trusted_certificate in kong.conf to specify the CA (or server) certificate used by your Redis server. You may also need to configure lua_ssl_verify_depth accordingly.
    Username string
    Username to use for Redis connections. If undefined, ACL authentication won't be performed. This requires Redis v6.0.0+. To be compatible with Redis v5.x.y, you can set it to default.
    ClusterMaxRedirections float64
    Maximum retry attempts for redirection.
    ClusterNodes []GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNode
    Cluster addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Cluster. The minimum length of the array is 1 element.
    ConnectTimeout float64
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    ConnectionIsProxied bool
    If the connection to Redis is proxied (e.g. Envoy), set it true. Set the host and port to point to the proxy address.
    Database float64
    Database to use for the Redis connection when using the redis strategy
    Host string
    A string representing a host name, such as example.com.
    KeepaliveBacklog float64
    Limits the total number of opened connections for a pool. If the connection pool is full, connection queues above the limit go into the backlog queue. If the backlog queue is full, subsequent connect operations fail and return nil. Queued operations (subject to set timeouts) resume once the number of connections in the pool is less than keepalive_pool_size. If latency is high or throughput is low, try increasing this value. Empirically, this value is larger than keepalive_pool_size.
    KeepalivePoolSize float64
    The size limit for every cosocket connection pool associated with every remote server, per worker process. If neither keepalive_pool_size nor keepalive_backlog is specified, no pool is created. If keepalive_pool_size isn't specified but keepalive_backlog is specified, then the pool uses the default value. Try to increase (e.g. 512) this value if latency is high or throughput is low.
    Password string
    Password to use for Redis connections. If undefined, no AUTH commands are sent to Redis.
    Port float64
    An integer representing a port number between 0 and 65535, inclusive.
    ReadTimeout float64
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    SendTimeout float64
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    SentinelMaster string
    Sentinel master to use for Redis connections. Defining this value implies using Redis Sentinel.
    SentinelNodes []GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNode
    Sentinel node addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Sentinel. The minimum length of the array is 1 element.
    SentinelPassword string
    Sentinel password to authenticate with a Redis Sentinel instance. If undefined, no AUTH commands are sent to Redis Sentinels.
    SentinelRole string
    Sentinel role to use for Redis connections when the redis strategy is defined. Defining this value implies using Redis Sentinel. must be one of ["any", "master", "slave"]
    SentinelUsername string
    Sentinel username to authenticate with a Redis Sentinel instance. If undefined, ACL authentication won't be performed. This requires Redis v6.2.0+.
    ServerName string
    A string representing an SNI (server name indication) value for TLS.
    Ssl bool
    If set to true, uses SSL to connect to Redis.
    SslVerify bool
    If set to true, verifies the validity of the server SSL certificate. If setting this parameter, also configure lua_ssl_trusted_certificate in kong.conf to specify the CA (or server) certificate used by your Redis server. You may also need to configure lua_ssl_verify_depth accordingly.
    Username string
    Username to use for Redis connections. If undefined, ACL authentication won't be performed. This requires Redis v6.0.0+. To be compatible with Redis v5.x.y, you can set it to default.
    clusterMaxRedirections Double
    Maximum retry attempts for redirection.
    clusterNodes List<GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNode>
    Cluster addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Cluster. The minimum length of the array is 1 element.
    connectTimeout Double
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    connectionIsProxied Boolean
    If the connection to Redis is proxied (e.g. Envoy), set it true. Set the host and port to point to the proxy address.
    database Double
    Database to use for the Redis connection when using the redis strategy
    host String
    A string representing a host name, such as example.com.
    keepaliveBacklog Double
    Limits the total number of opened connections for a pool. If the connection pool is full, connection queues above the limit go into the backlog queue. If the backlog queue is full, subsequent connect operations fail and return nil. Queued operations (subject to set timeouts) resume once the number of connections in the pool is less than keepalive_pool_size. If latency is high or throughput is low, try increasing this value. Empirically, this value is larger than keepalive_pool_size.
    keepalivePoolSize Double
    The size limit for every cosocket connection pool associated with every remote server, per worker process. If neither keepalive_pool_size nor keepalive_backlog is specified, no pool is created. If keepalive_pool_size isn't specified but keepalive_backlog is specified, then the pool uses the default value. Try to increase (e.g. 512) this value if latency is high or throughput is low.
    password String
    Password to use for Redis connections. If undefined, no AUTH commands are sent to Redis.
    port Double
    An integer representing a port number between 0 and 65535, inclusive.
    readTimeout Double
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    sendTimeout Double
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    sentinelMaster String
    Sentinel master to use for Redis connections. Defining this value implies using Redis Sentinel.
    sentinelNodes List<GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNode>
    Sentinel node addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Sentinel. The minimum length of the array is 1 element.
    sentinelPassword String
    Sentinel password to authenticate with a Redis Sentinel instance. If undefined, no AUTH commands are sent to Redis Sentinels.
    sentinelRole String
    Sentinel role to use for Redis connections when the redis strategy is defined. Defining this value implies using Redis Sentinel. must be one of ["any", "master", "slave"]
    sentinelUsername String
    Sentinel username to authenticate with a Redis Sentinel instance. If undefined, ACL authentication won't be performed. This requires Redis v6.2.0+.
    serverName String
    A string representing an SNI (server name indication) value for TLS.
    ssl Boolean
    If set to true, uses SSL to connect to Redis.
    sslVerify Boolean
    If set to true, verifies the validity of the server SSL certificate. If setting this parameter, also configure lua_ssl_trusted_certificate in kong.conf to specify the CA (or server) certificate used by your Redis server. You may also need to configure lua_ssl_verify_depth accordingly.
    username String
    Username to use for Redis connections. If undefined, ACL authentication won't be performed. This requires Redis v6.0.0+. To be compatible with Redis v5.x.y, you can set it to default.
    clusterMaxRedirections number
    Maximum retry attempts for redirection.
    clusterNodes GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNode[]
    Cluster addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Cluster. The minimum length of the array is 1 element.
    connectTimeout number
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    connectionIsProxied boolean
    If the connection to Redis is proxied (e.g. Envoy), set it true. Set the host and port to point to the proxy address.
    database number
    Database to use for the Redis connection when using the redis strategy
    host string
    A string representing a host name, such as example.com.
    keepaliveBacklog number
    Limits the total number of opened connections for a pool. If the connection pool is full, connection queues above the limit go into the backlog queue. If the backlog queue is full, subsequent connect operations fail and return nil. Queued operations (subject to set timeouts) resume once the number of connections in the pool is less than keepalive_pool_size. If latency is high or throughput is low, try increasing this value. Empirically, this value is larger than keepalive_pool_size.
    keepalivePoolSize number
    The size limit for every cosocket connection pool associated with every remote server, per worker process. If neither keepalive_pool_size nor keepalive_backlog is specified, no pool is created. If keepalive_pool_size isn't specified but keepalive_backlog is specified, then the pool uses the default value. Try to increase (e.g. 512) this value if latency is high or throughput is low.
    password string
    Password to use for Redis connections. If undefined, no AUTH commands are sent to Redis.
    port number
    An integer representing a port number between 0 and 65535, inclusive.
    readTimeout number
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    sendTimeout number
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    sentinelMaster string
    Sentinel master to use for Redis connections. Defining this value implies using Redis Sentinel.
    sentinelNodes GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNode[]
    Sentinel node addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Sentinel. The minimum length of the array is 1 element.
    sentinelPassword string
    Sentinel password to authenticate with a Redis Sentinel instance. If undefined, no AUTH commands are sent to Redis Sentinels.
    sentinelRole string
    Sentinel role to use for Redis connections when the redis strategy is defined. Defining this value implies using Redis Sentinel. must be one of ["any", "master", "slave"]
    sentinelUsername string
    Sentinel username to authenticate with a Redis Sentinel instance. If undefined, ACL authentication won't be performed. This requires Redis v6.2.0+.
    serverName string
    A string representing an SNI (server name indication) value for TLS.
    ssl boolean
    If set to true, uses SSL to connect to Redis.
    sslVerify boolean
    If set to true, verifies the validity of the server SSL certificate. If setting this parameter, also configure lua_ssl_trusted_certificate in kong.conf to specify the CA (or server) certificate used by your Redis server. You may also need to configure lua_ssl_verify_depth accordingly.
    username string
    Username to use for Redis connections. If undefined, ACL authentication won't be performed. This requires Redis v6.0.0+. To be compatible with Redis v5.x.y, you can set it to default.
    cluster_max_redirections float
    Maximum retry attempts for redirection.
    cluster_nodes Sequence[GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNode]
    Cluster addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Cluster. The minimum length of the array is 1 element.
    connect_timeout float
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    connection_is_proxied bool
    If the connection to Redis is proxied (e.g. Envoy), set it true. Set the host and port to point to the proxy address.
    database float
    Database to use for the Redis connection when using the redis strategy
    host str
    A string representing a host name, such as example.com.
    keepalive_backlog float
    Limits the total number of opened connections for a pool. If the connection pool is full, connection queues above the limit go into the backlog queue. If the backlog queue is full, subsequent connect operations fail and return nil. Queued operations (subject to set timeouts) resume once the number of connections in the pool is less than keepalive_pool_size. If latency is high or throughput is low, try increasing this value. Empirically, this value is larger than keepalive_pool_size.
    keepalive_pool_size float
    The size limit for every cosocket connection pool associated with every remote server, per worker process. If neither keepalive_pool_size nor keepalive_backlog is specified, no pool is created. If keepalive_pool_size isn't specified but keepalive_backlog is specified, then the pool uses the default value. Try to increase (e.g. 512) this value if latency is high or throughput is low.
    password str
    Password to use for Redis connections. If undefined, no AUTH commands are sent to Redis.
    port float
    An integer representing a port number between 0 and 65535, inclusive.
    read_timeout float
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    send_timeout float
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    sentinel_master str
    Sentinel master to use for Redis connections. Defining this value implies using Redis Sentinel.
    sentinel_nodes Sequence[GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNode]
    Sentinel node addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Sentinel. The minimum length of the array is 1 element.
    sentinel_password str
    Sentinel password to authenticate with a Redis Sentinel instance. If undefined, no AUTH commands are sent to Redis Sentinels.
    sentinel_role str
    Sentinel role to use for Redis connections when the redis strategy is defined. Defining this value implies using Redis Sentinel. must be one of ["any", "master", "slave"]
    sentinel_username str
    Sentinel username to authenticate with a Redis Sentinel instance. If undefined, ACL authentication won't be performed. This requires Redis v6.2.0+.
    server_name str
    A string representing an SNI (server name indication) value for TLS.
    ssl bool
    If set to true, uses SSL to connect to Redis.
    ssl_verify bool
    If set to true, verifies the validity of the server SSL certificate. If setting this parameter, also configure lua_ssl_trusted_certificate in kong.conf to specify the CA (or server) certificate used by your Redis server. You may also need to configure lua_ssl_verify_depth accordingly.
    username str
    Username to use for Redis connections. If undefined, ACL authentication won't be performed. This requires Redis v6.0.0+. To be compatible with Redis v5.x.y, you can set it to default.
    clusterMaxRedirections Number
    Maximum retry attempts for redirection.
    clusterNodes List<Property Map>
    Cluster addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Cluster. The minimum length of the array is 1 element.
    connectTimeout Number
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    connectionIsProxied Boolean
    If the connection to Redis is proxied (e.g. Envoy), set it true. Set the host and port to point to the proxy address.
    database Number
    Database to use for the Redis connection when using the redis strategy
    host String
    A string representing a host name, such as example.com.
    keepaliveBacklog Number
    Limits the total number of opened connections for a pool. If the connection pool is full, connection queues above the limit go into the backlog queue. If the backlog queue is full, subsequent connect operations fail and return nil. Queued operations (subject to set timeouts) resume once the number of connections in the pool is less than keepalive_pool_size. If latency is high or throughput is low, try increasing this value. Empirically, this value is larger than keepalive_pool_size.
    keepalivePoolSize Number
    The size limit for every cosocket connection pool associated with every remote server, per worker process. If neither keepalive_pool_size nor keepalive_backlog is specified, no pool is created. If keepalive_pool_size isn't specified but keepalive_backlog is specified, then the pool uses the default value. Try to increase (e.g. 512) this value if latency is high or throughput is low.
    password String
    Password to use for Redis connections. If undefined, no AUTH commands are sent to Redis.
    port Number
    An integer representing a port number between 0 and 65535, inclusive.
    readTimeout Number
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    sendTimeout Number
    An integer representing a timeout in milliseconds. Must be between 0 and 2^31-2.
    sentinelMaster String
    Sentinel master to use for Redis connections. Defining this value implies using Redis Sentinel.
    sentinelNodes List<Property Map>
    Sentinel node addresses to use for Redis connections when the redis strategy is defined. Defining this field implies using a Redis Sentinel. The minimum length of the array is 1 element.
    sentinelPassword String
    Sentinel password to authenticate with a Redis Sentinel instance. If undefined, no AUTH commands are sent to Redis Sentinels.
    sentinelRole String
    Sentinel role to use for Redis connections when the redis strategy is defined. Defining this value implies using Redis Sentinel. must be one of ["any", "master", "slave"]
    sentinelUsername String
    Sentinel username to authenticate with a Redis Sentinel instance. If undefined, ACL authentication won't be performed. This requires Redis v6.2.0+.
    serverName String
    A string representing an SNI (server name indication) value for TLS.
    ssl Boolean
    If set to true, uses SSL to connect to Redis.
    sslVerify Boolean
    If set to true, verifies the validity of the server SSL certificate. If setting this parameter, also configure lua_ssl_trusted_certificate in kong.conf to specify the CA (or server) certificate used by your Redis server. You may also need to configure lua_ssl_verify_depth accordingly.
    username String
    Username to use for Redis connections. If undefined, ACL authentication won't be performed. This requires Redis v6.0.0+. To be compatible with Redis v5.x.y, you can set it to default.

    GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNode, GatewayPluginAiProxyAdvancedConfigVectordbRedisClusterNodeArgs

    Ip string
    A string representing a host name, such as example.com.
    Port double
    An integer representing a port number between 0 and 65535, inclusive.
    Ip string
    A string representing a host name, such as example.com.
    Port float64
    An integer representing a port number between 0 and 65535, inclusive.
    ip String
    A string representing a host name, such as example.com.
    port Double
    An integer representing a port number between 0 and 65535, inclusive.
    ip string
    A string representing a host name, such as example.com.
    port number
    An integer representing a port number between 0 and 65535, inclusive.
    ip str
    A string representing a host name, such as example.com.
    port float
    An integer representing a port number between 0 and 65535, inclusive.
    ip String
    A string representing a host name, such as example.com.
    port Number
    An integer representing a port number between 0 and 65535, inclusive.

    GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNode, GatewayPluginAiProxyAdvancedConfigVectordbRedisSentinelNodeArgs

    Host string
    A string representing a host name, such as example.com.
    Port double
    An integer representing a port number between 0 and 65535, inclusive.
    Host string
    A string representing a host name, such as example.com.
    Port float64
    An integer representing a port number between 0 and 65535, inclusive.
    host String
    A string representing a host name, such as example.com.
    port Double
    An integer representing a port number between 0 and 65535, inclusive.
    host string
    A string representing a host name, such as example.com.
    port number
    An integer representing a port number between 0 and 65535, inclusive.
    host str
    A string representing a host name, such as example.com.
    port float
    An integer representing a port number between 0 and 65535, inclusive.
    host String
    A string representing a host name, such as example.com.
    port Number
    An integer representing a port number between 0 and 65535, inclusive.

    GatewayPluginAiProxyAdvancedConsumer, GatewayPluginAiProxyAdvancedConsumerArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiProxyAdvancedConsumerGroup, GatewayPluginAiProxyAdvancedConsumerGroupArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiProxyAdvancedOrdering, GatewayPluginAiProxyAdvancedOrderingArgs

    GatewayPluginAiProxyAdvancedOrderingAfter, GatewayPluginAiProxyAdvancedOrderingAfterArgs

    Accesses List<string>
    Accesses []string
    accesses List<String>
    accesses string[]
    accesses Sequence[str]
    accesses List<String>

    GatewayPluginAiProxyAdvancedOrderingBefore, GatewayPluginAiProxyAdvancedOrderingBeforeArgs

    Accesses List<string>
    Accesses []string
    accesses List<String>
    accesses string[]
    accesses Sequence[str]
    accesses List<String>

    GatewayPluginAiProxyAdvancedRoute, GatewayPluginAiProxyAdvancedRouteArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiProxyAdvancedService, GatewayPluginAiProxyAdvancedServiceArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    Import

    $ pulumi import konnect:index/gatewayPluginAiProxyAdvanced:GatewayPluginAiProxyAdvanced my_konnect_gateway_plugin_ai_proxy_advanced "{ \"control_plane_id\": \"9524ec7d-36d9-465d-a8c5-83a3c9390458\", \"plugin_id\": \"3473c251-5b6c-4f45-b1ff-7ede735a366d\"}"
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    konnect kong/terraform-provider-konnect
    License
    Notes
    This Pulumi package is based on the konnect Terraform Provider.
    konnect logo
    konnect 2.4.1 published on Thursday, Mar 13, 2025 by kong