1. Packages
  2. Airbyte Provider
  3. API Docs
  4. DestinationPgvector
airbyte 0.7.0-beta2 published on Friday, Mar 7, 2025 by airbytehq

airbyte.DestinationPgvector

Explore with Pulumi AI

airbyte logo
airbyte 0.7.0-beta2 published on Friday, Mar 7, 2025 by airbytehq

    DestinationPgvector Resource

    Example Usage

    Coming soon!
    
    Coming soon!
    
    Coming soon!
    
    Coming soon!
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.airbyte.DestinationPgvector;
    import com.pulumi.airbyte.DestinationPgvectorArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingCohereArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingFakeArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingOpenAiArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationIndexingArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationIndexingCredentialsArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs;
    import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var myDestinationPgvector = new DestinationPgvector("myDestinationPgvector", DestinationPgvectorArgs.builder()
                .configuration(DestinationPgvectorConfigurationArgs.builder()
                    .embedding(DestinationPgvectorConfigurationEmbeddingArgs.builder()
                        .azureOpenAi(DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs.builder()
                            .apiBase("https://your-resource-name.openai.azure.com")
                            .deployment("your-resource-name")
                            .openaiKey("...my_openai_key...")
                            .build())
                        .cohere(DestinationPgvectorConfigurationEmbeddingCohereArgs.builder()
                            .cohereKey("...my_cohere_key...")
                            .build())
                        .fake()
                        .openAi(DestinationPgvectorConfigurationEmbeddingOpenAiArgs.builder()
                            .openaiKey("...my_openai_key...")
                            .build())
                        .openAiCompatible(DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs.builder()
                            .apiKey("...my_api_key...")
                            .baseUrl("https://your-service-name.com")
                            .dimensions(1536)
                            .modelName("text-embedding-ada-002")
                            .build())
                        .build())
                    .indexing(DestinationPgvectorConfigurationIndexingArgs.builder()
                        .credentials(DestinationPgvectorConfigurationIndexingCredentialsArgs.builder()
                            .password("AIRBYTE_PASSWORD")
                            .build())
                        .database("AIRBYTE_DATABASE")
                        .defaultSchema("AIRBYTE_SCHEMA")
                        .host("AIRBYTE_ACCOUNT")
                        .port(5432)
                        .username("AIRBYTE_USER")
                        .build())
                    .omit_raw_text(true)
                    .processing(DestinationPgvectorConfigurationProcessingArgs.builder()
                        .chunkOverlap(7)
                        .chunkSize(8035)
                        .fieldNameMappings(DestinationPgvectorConfigurationProcessingFieldNameMappingArgs.builder()
                            .fromField("...my_from_field...")
                            .toField("...my_to_field...")
                            .build())
                        .metadataFields("...")
                        .textFields("...")
                        .textSplitter(DestinationPgvectorConfigurationProcessingTextSplitterArgs.builder()
                            .byMarkdownHeader(DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs.builder()
                                .splitLevel(5)
                                .build())
                            .byProgrammingLanguage(DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs.builder()
                                .language("js")
                                .build())
                            .bySeparator(DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs.builder()
                                .keepSeparator(false)
                                .separators("...")
                                .build())
                            .build())
                        .build())
                    .build())
                .definitionId("ace91495-b654-40da-a8bd-73a5b3a4b3ee")
                .workspaceId("0b8f211f-70ad-47f2-a6ea-1e915e8005be")
                .build());
    
        }
    }
    
    resources:
      myDestinationPgvector:
        type: airbyte:DestinationPgvector
        properties:
          configuration:
            embedding:
              azureOpenAi:
                apiBase: https://your-resource-name.openai.azure.com
                deployment: your-resource-name
                openaiKey: '...my_openai_key...'
              cohere:
                cohereKey: '...my_cohere_key...'
              fake: {}
              openAi:
                openaiKey: '...my_openai_key...'
              openAiCompatible:
                apiKey: '...my_api_key...'
                baseUrl: https://your-service-name.com
                dimensions: 1536
                modelName: text-embedding-ada-002
            indexing:
              credentials:
                password: AIRBYTE_PASSWORD
              database: AIRBYTE_DATABASE
              defaultSchema: AIRBYTE_SCHEMA
              host: AIRBYTE_ACCOUNT
              port: 5432
              username: AIRBYTE_USER
            omit_raw_text: true
            processing:
              chunkOverlap: 7
              chunkSize: 8035
              fieldNameMappings:
                - fromField: '...my_from_field...'
                  toField: '...my_to_field...'
              metadataFields:
                - '...'
              textFields:
                - '...'
              textSplitter:
                byMarkdownHeader:
                  splitLevel: 5
                byProgrammingLanguage:
                  language: js
                bySeparator:
                  keepSeparator: false
                  separators:
                    - '...'
          definitionId: ace91495-b654-40da-a8bd-73a5b3a4b3ee
          workspaceId: 0b8f211f-70ad-47f2-a6ea-1e915e8005be
    

    Create DestinationPgvector Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new DestinationPgvector(name: string, args: DestinationPgvectorArgs, opts?: CustomResourceOptions);
    @overload
    def DestinationPgvector(resource_name: str,
                            args: DestinationPgvectorArgs,
                            opts: Optional[ResourceOptions] = None)
    
    @overload
    def DestinationPgvector(resource_name: str,
                            opts: Optional[ResourceOptions] = None,
                            configuration: Optional[DestinationPgvectorConfigurationArgs] = None,
                            workspace_id: Optional[str] = None,
                            definition_id: Optional[str] = None,
                            name: Optional[str] = None)
    func NewDestinationPgvector(ctx *Context, name string, args DestinationPgvectorArgs, opts ...ResourceOption) (*DestinationPgvector, error)
    public DestinationPgvector(string name, DestinationPgvectorArgs args, CustomResourceOptions? opts = null)
    public DestinationPgvector(String name, DestinationPgvectorArgs args)
    public DestinationPgvector(String name, DestinationPgvectorArgs args, CustomResourceOptions options)
    
    type: airbyte:DestinationPgvector
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args DestinationPgvectorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args DestinationPgvectorArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args DestinationPgvectorArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args DestinationPgvectorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args DestinationPgvectorArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var destinationPgvectorResource = new Airbyte.DestinationPgvector("destinationPgvectorResource", new()
    {
        Configuration = new Airbyte.Inputs.DestinationPgvectorConfigurationArgs
        {
            Embedding = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingArgs
            {
                AzureOpenAi = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs
                {
                    ApiBase = "string",
                    Deployment = "string",
                    OpenaiKey = "string",
                },
                Cohere = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingCohereArgs
                {
                    CohereKey = "string",
                },
                Fake = null,
                OpenAi = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingOpenAiArgs
                {
                    OpenaiKey = "string",
                },
                OpenAiCompatible = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs
                {
                    BaseUrl = "string",
                    Dimensions = 0,
                    ApiKey = "string",
                    ModelName = "string",
                },
            },
            Indexing = new Airbyte.Inputs.DestinationPgvectorConfigurationIndexingArgs
            {
                Credentials = new Airbyte.Inputs.DestinationPgvectorConfigurationIndexingCredentialsArgs
                {
                    Password = "string",
                },
                Database = "string",
                Host = "string",
                Username = "string",
                DefaultSchema = "string",
                Port = 0,
            },
            Processing = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingArgs
            {
                ChunkSize = 0,
                ChunkOverlap = 0,
                FieldNameMappings = new[]
                {
                    new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingFieldNameMappingArgs
                    {
                        FromField = "string",
                        ToField = "string",
                    },
                },
                MetadataFields = new[]
                {
                    "string",
                },
                TextFields = new[]
                {
                    "string",
                },
                TextSplitter = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterArgs
                {
                    ByMarkdownHeader = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs
                    {
                        SplitLevel = 0,
                    },
                    ByProgrammingLanguage = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs
                    {
                        Language = "string",
                    },
                    BySeparator = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs
                    {
                        KeepSeparator = false,
                        Separators = new[]
                        {
                            "string",
                        },
                    },
                },
            },
            OmitRawText = false,
        },
        WorkspaceId = "string",
        DefinitionId = "string",
        Name = "string",
    });
    
    example, err := airbyte.NewDestinationPgvector(ctx, "destinationPgvectorResource", &airbyte.DestinationPgvectorArgs{
    Configuration: &.DestinationPgvectorConfigurationArgs{
    Embedding: &.DestinationPgvectorConfigurationEmbeddingArgs{
    AzureOpenAi: &.DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs{
    ApiBase: pulumi.String("string"),
    Deployment: pulumi.String("string"),
    OpenaiKey: pulumi.String("string"),
    },
    Cohere: &.DestinationPgvectorConfigurationEmbeddingCohereArgs{
    CohereKey: pulumi.String("string"),
    },
    Fake: &.DestinationPgvectorConfigurationEmbeddingFakeArgs{
    },
    OpenAi: &.DestinationPgvectorConfigurationEmbeddingOpenAiArgs{
    OpenaiKey: pulumi.String("string"),
    },
    OpenAiCompatible: &.DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs{
    BaseUrl: pulumi.String("string"),
    Dimensions: pulumi.Float64(0),
    ApiKey: pulumi.String("string"),
    ModelName: pulumi.String("string"),
    },
    },
    Indexing: &.DestinationPgvectorConfigurationIndexingArgs{
    Credentials: &.DestinationPgvectorConfigurationIndexingCredentialsArgs{
    Password: pulumi.String("string"),
    },
    Database: pulumi.String("string"),
    Host: pulumi.String("string"),
    Username: pulumi.String("string"),
    DefaultSchema: pulumi.String("string"),
    Port: pulumi.Float64(0),
    },
    Processing: &.DestinationPgvectorConfigurationProcessingArgs{
    ChunkSize: pulumi.Float64(0),
    ChunkOverlap: pulumi.Float64(0),
    FieldNameMappings: .DestinationPgvectorConfigurationProcessingFieldNameMappingArray{
    &.DestinationPgvectorConfigurationProcessingFieldNameMappingArgs{
    FromField: pulumi.String("string"),
    ToField: pulumi.String("string"),
    },
    },
    MetadataFields: pulumi.StringArray{
    pulumi.String("string"),
    },
    TextFields: pulumi.StringArray{
    pulumi.String("string"),
    },
    TextSplitter: &.DestinationPgvectorConfigurationProcessingTextSplitterArgs{
    ByMarkdownHeader: &.DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs{
    SplitLevel: pulumi.Float64(0),
    },
    ByProgrammingLanguage: &.DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs{
    Language: pulumi.String("string"),
    },
    BySeparator: &.DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs{
    KeepSeparator: pulumi.Bool(false),
    Separators: pulumi.StringArray{
    pulumi.String("string"),
    },
    },
    },
    },
    OmitRawText: pulumi.Bool(false),
    },
    WorkspaceId: pulumi.String("string"),
    DefinitionId: pulumi.String("string"),
    Name: pulumi.String("string"),
    })
    
    var destinationPgvectorResource = new DestinationPgvector("destinationPgvectorResource", DestinationPgvectorArgs.builder()
        .configuration(DestinationPgvectorConfigurationArgs.builder()
            .embedding(DestinationPgvectorConfigurationEmbeddingArgs.builder()
                .azureOpenAi(DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs.builder()
                    .apiBase("string")
                    .deployment("string")
                    .openaiKey("string")
                    .build())
                .cohere(DestinationPgvectorConfigurationEmbeddingCohereArgs.builder()
                    .cohereKey("string")
                    .build())
                .fake()
                .openAi(DestinationPgvectorConfigurationEmbeddingOpenAiArgs.builder()
                    .openaiKey("string")
                    .build())
                .openAiCompatible(DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs.builder()
                    .baseUrl("string")
                    .dimensions(0)
                    .apiKey("string")
                    .modelName("string")
                    .build())
                .build())
            .indexing(DestinationPgvectorConfigurationIndexingArgs.builder()
                .credentials(DestinationPgvectorConfigurationIndexingCredentialsArgs.builder()
                    .password("string")
                    .build())
                .database("string")
                .host("string")
                .username("string")
                .defaultSchema("string")
                .port(0)
                .build())
            .processing(DestinationPgvectorConfigurationProcessingArgs.builder()
                .chunkSize(0)
                .chunkOverlap(0)
                .fieldNameMappings(DestinationPgvectorConfigurationProcessingFieldNameMappingArgs.builder()
                    .fromField("string")
                    .toField("string")
                    .build())
                .metadataFields("string")
                .textFields("string")
                .textSplitter(DestinationPgvectorConfigurationProcessingTextSplitterArgs.builder()
                    .byMarkdownHeader(DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs.builder()
                        .splitLevel(0)
                        .build())
                    .byProgrammingLanguage(DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs.builder()
                        .language("string")
                        .build())
                    .bySeparator(DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs.builder()
                        .keepSeparator(false)
                        .separators("string")
                        .build())
                    .build())
                .build())
            .omitRawText(false)
            .build())
        .workspaceId("string")
        .definitionId("string")
        .name("string")
        .build());
    
    destination_pgvector_resource = airbyte.DestinationPgvector("destinationPgvectorResource",
        configuration={
            "embedding": {
                "azure_open_ai": {
                    "api_base": "string",
                    "deployment": "string",
                    "openai_key": "string",
                },
                "cohere": {
                    "cohere_key": "string",
                },
                "fake": {},
                "open_ai": {
                    "openai_key": "string",
                },
                "open_ai_compatible": {
                    "base_url": "string",
                    "dimensions": 0,
                    "api_key": "string",
                    "model_name": "string",
                },
            },
            "indexing": {
                "credentials": {
                    "password": "string",
                },
                "database": "string",
                "host": "string",
                "username": "string",
                "default_schema": "string",
                "port": 0,
            },
            "processing": {
                "chunk_size": 0,
                "chunk_overlap": 0,
                "field_name_mappings": [{
                    "from_field": "string",
                    "to_field": "string",
                }],
                "metadata_fields": ["string"],
                "text_fields": ["string"],
                "text_splitter": {
                    "by_markdown_header": {
                        "split_level": 0,
                    },
                    "by_programming_language": {
                        "language": "string",
                    },
                    "by_separator": {
                        "keep_separator": False,
                        "separators": ["string"],
                    },
                },
            },
            "omit_raw_text": False,
        },
        workspace_id="string",
        definition_id="string",
        name="string")
    
    const destinationPgvectorResource = new airbyte.DestinationPgvector("destinationPgvectorResource", {
        configuration: {
            embedding: {
                azureOpenAi: {
                    apiBase: "string",
                    deployment: "string",
                    openaiKey: "string",
                },
                cohere: {
                    cohereKey: "string",
                },
                fake: {},
                openAi: {
                    openaiKey: "string",
                },
                openAiCompatible: {
                    baseUrl: "string",
                    dimensions: 0,
                    apiKey: "string",
                    modelName: "string",
                },
            },
            indexing: {
                credentials: {
                    password: "string",
                },
                database: "string",
                host: "string",
                username: "string",
                defaultSchema: "string",
                port: 0,
            },
            processing: {
                chunkSize: 0,
                chunkOverlap: 0,
                fieldNameMappings: [{
                    fromField: "string",
                    toField: "string",
                }],
                metadataFields: ["string"],
                textFields: ["string"],
                textSplitter: {
                    byMarkdownHeader: {
                        splitLevel: 0,
                    },
                    byProgrammingLanguage: {
                        language: "string",
                    },
                    bySeparator: {
                        keepSeparator: false,
                        separators: ["string"],
                    },
                },
            },
            omitRawText: false,
        },
        workspaceId: "string",
        definitionId: "string",
        name: "string",
    });
    
    type: airbyte:DestinationPgvector
    properties:
        configuration:
            embedding:
                azureOpenAi:
                    apiBase: string
                    deployment: string
                    openaiKey: string
                cohere:
                    cohereKey: string
                fake: {}
                openAi:
                    openaiKey: string
                openAiCompatible:
                    apiKey: string
                    baseUrl: string
                    dimensions: 0
                    modelName: string
            indexing:
                credentials:
                    password: string
                database: string
                defaultSchema: string
                host: string
                port: 0
                username: string
            omitRawText: false
            processing:
                chunkOverlap: 0
                chunkSize: 0
                fieldNameMappings:
                    - fromField: string
                      toField: string
                metadataFields:
                    - string
                textFields:
                    - string
                textSplitter:
                    byMarkdownHeader:
                        splitLevel: 0
                    byProgrammingLanguage:
                        language: string
                    bySeparator:
                        keepSeparator: false
                        separators:
                            - string
        definitionId: string
        name: string
        workspaceId: string
    

    DestinationPgvector Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The DestinationPgvector resource accepts the following input properties:

    Configuration DestinationPgvectorConfiguration
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    WorkspaceId string
    DefinitionId string
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    Name string
    Name of the destination e.g. dev-mysql-instance.
    Configuration DestinationPgvectorConfigurationArgs
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    WorkspaceId string
    DefinitionId string
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    Name string
    Name of the destination e.g. dev-mysql-instance.
    configuration DestinationPgvectorConfiguration
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    workspaceId String
    definitionId String
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    name String
    Name of the destination e.g. dev-mysql-instance.
    configuration DestinationPgvectorConfiguration
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    workspaceId string
    definitionId string
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    name string
    Name of the destination e.g. dev-mysql-instance.
    configuration DestinationPgvectorConfigurationArgs
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    workspace_id str
    definition_id str
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    name str
    Name of the destination e.g. dev-mysql-instance.
    configuration Property Map
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    workspaceId String
    definitionId String
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    name String
    Name of the destination e.g. dev-mysql-instance.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the DestinationPgvector resource produces the following output properties:

    CreatedAt double
    DestinationId string
    DestinationType string
    Id string
    The provider-assigned unique ID for this managed resource.
    CreatedAt float64
    DestinationId string
    DestinationType string
    Id string
    The provider-assigned unique ID for this managed resource.
    createdAt Double
    destinationId String
    destinationType String
    id String
    The provider-assigned unique ID for this managed resource.
    createdAt number
    destinationId string
    destinationType string
    id string
    The provider-assigned unique ID for this managed resource.
    created_at float
    destination_id str
    destination_type str
    id str
    The provider-assigned unique ID for this managed resource.
    createdAt Number
    destinationId String
    destinationType String
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing DestinationPgvector Resource

    Get an existing DestinationPgvector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: DestinationPgvectorState, opts?: CustomResourceOptions): DestinationPgvector
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            configuration: Optional[DestinationPgvectorConfigurationArgs] = None,
            created_at: Optional[float] = None,
            definition_id: Optional[str] = None,
            destination_id: Optional[str] = None,
            destination_type: Optional[str] = None,
            name: Optional[str] = None,
            workspace_id: Optional[str] = None) -> DestinationPgvector
    func GetDestinationPgvector(ctx *Context, name string, id IDInput, state *DestinationPgvectorState, opts ...ResourceOption) (*DestinationPgvector, error)
    public static DestinationPgvector Get(string name, Input<string> id, DestinationPgvectorState? state, CustomResourceOptions? opts = null)
    public static DestinationPgvector get(String name, Output<String> id, DestinationPgvectorState state, CustomResourceOptions options)
    resources:  _:    type: airbyte:DestinationPgvector    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Configuration DestinationPgvectorConfiguration
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    CreatedAt double
    DefinitionId string
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    DestinationId string
    DestinationType string
    Name string
    Name of the destination e.g. dev-mysql-instance.
    WorkspaceId string
    Configuration DestinationPgvectorConfigurationArgs
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    CreatedAt float64
    DefinitionId string
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    DestinationId string
    DestinationType string
    Name string
    Name of the destination e.g. dev-mysql-instance.
    WorkspaceId string
    configuration DestinationPgvectorConfiguration
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    createdAt Double
    definitionId String
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    destinationId String
    destinationType String
    name String
    Name of the destination e.g. dev-mysql-instance.
    workspaceId String
    configuration DestinationPgvectorConfiguration
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    createdAt number
    definitionId string
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    destinationId string
    destinationType string
    name string
    Name of the destination e.g. dev-mysql-instance.
    workspaceId string
    configuration DestinationPgvectorConfigurationArgs
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    created_at float
    definition_id str
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    destination_id str
    destination_type str
    name str
    Name of the destination e.g. dev-mysql-instance.
    workspace_id str
    configuration Property Map
    The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
    createdAt Number
    definitionId String
    The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
    destinationId String
    destinationType String
    name String
    Name of the destination e.g. dev-mysql-instance.
    workspaceId String

    Supporting Types

    DestinationPgvectorConfiguration, DestinationPgvectorConfigurationArgs

    Embedding DestinationPgvectorConfigurationEmbedding
    Embedding configuration
    Indexing DestinationPgvectorConfigurationIndexing
    Postgres can be used to store vector data and retrieve embeddings.
    Processing DestinationPgvectorConfigurationProcessing
    OmitRawText bool
    Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
    Embedding DestinationPgvectorConfigurationEmbedding
    Embedding configuration
    Indexing DestinationPgvectorConfigurationIndexing
    Postgres can be used to store vector data and retrieve embeddings.
    Processing DestinationPgvectorConfigurationProcessing
    OmitRawText bool
    Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
    embedding DestinationPgvectorConfigurationEmbedding
    Embedding configuration
    indexing DestinationPgvectorConfigurationIndexing
    Postgres can be used to store vector data and retrieve embeddings.
    processing DestinationPgvectorConfigurationProcessing
    omitRawText Boolean
    Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
    embedding DestinationPgvectorConfigurationEmbedding
    Embedding configuration
    indexing DestinationPgvectorConfigurationIndexing
    Postgres can be used to store vector data and retrieve embeddings.
    processing DestinationPgvectorConfigurationProcessing
    omitRawText boolean
    Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
    embedding DestinationPgvectorConfigurationEmbedding
    Embedding configuration
    indexing DestinationPgvectorConfigurationIndexing
    Postgres can be used to store vector data and retrieve embeddings.
    processing DestinationPgvectorConfigurationProcessing
    omit_raw_text bool
    Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
    embedding Property Map
    Embedding configuration
    indexing Property Map
    Postgres can be used to store vector data and retrieve embeddings.
    processing Property Map
    omitRawText Boolean
    Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false

    DestinationPgvectorConfigurationEmbedding, DestinationPgvectorConfigurationEmbeddingArgs

    AzureOpenAi DestinationPgvectorConfigurationEmbeddingAzureOpenAi
    Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    Cohere DestinationPgvectorConfigurationEmbeddingCohere
    Use the Cohere API to embed text.
    Fake DestinationPgvectorConfigurationEmbeddingFake
    Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
    OpenAi DestinationPgvectorConfigurationEmbeddingOpenAi
    Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    OpenAiCompatible DestinationPgvectorConfigurationEmbeddingOpenAiCompatible
    Use a service that's compatible with the OpenAI API to embed text.
    AzureOpenAi DestinationPgvectorConfigurationEmbeddingAzureOpenAi
    Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    Cohere DestinationPgvectorConfigurationEmbeddingCohere
    Use the Cohere API to embed text.
    Fake DestinationPgvectorConfigurationEmbeddingFake
    Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
    OpenAi DestinationPgvectorConfigurationEmbeddingOpenAi
    Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    OpenAiCompatible DestinationPgvectorConfigurationEmbeddingOpenAiCompatible
    Use a service that's compatible with the OpenAI API to embed text.
    azureOpenAi DestinationPgvectorConfigurationEmbeddingAzureOpenAi
    Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    cohere DestinationPgvectorConfigurationEmbeddingCohere
    Use the Cohere API to embed text.
    fake DestinationPgvectorConfigurationEmbeddingFake
    Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
    openAi DestinationPgvectorConfigurationEmbeddingOpenAi
    Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    openAiCompatible DestinationPgvectorConfigurationEmbeddingOpenAiCompatible
    Use a service that's compatible with the OpenAI API to embed text.
    azureOpenAi DestinationPgvectorConfigurationEmbeddingAzureOpenAi
    Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    cohere DestinationPgvectorConfigurationEmbeddingCohere
    Use the Cohere API to embed text.
    fake DestinationPgvectorConfigurationEmbeddingFake
    Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
    openAi DestinationPgvectorConfigurationEmbeddingOpenAi
    Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    openAiCompatible DestinationPgvectorConfigurationEmbeddingOpenAiCompatible
    Use a service that's compatible with the OpenAI API to embed text.
    azure_open_ai DestinationPgvectorConfigurationEmbeddingAzureOpenAi
    Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    cohere DestinationPgvectorConfigurationEmbeddingCohere
    Use the Cohere API to embed text.
    fake DestinationPgvectorConfigurationEmbeddingFake
    Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
    open_ai DestinationPgvectorConfigurationEmbeddingOpenAi
    Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    open_ai_compatible DestinationPgvectorConfigurationEmbeddingOpenAiCompatible
    Use a service that's compatible with the OpenAI API to embed text.
    azureOpenAi Property Map
    Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    cohere Property Map
    Use the Cohere API to embed text.
    fake Property Map
    Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
    openAi Property Map
    Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
    openAiCompatible Property Map
    Use a service that's compatible with the OpenAI API to embed text.

    DestinationPgvectorConfigurationEmbeddingAzureOpenAi, DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs

    ApiBase string
    The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    Deployment string
    The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    OpenaiKey string
    The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    ApiBase string
    The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    Deployment string
    The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    OpenaiKey string
    The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    apiBase String
    The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    deployment String
    The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    openaiKey String
    The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    apiBase string
    The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    deployment string
    The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    openaiKey string
    The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    api_base str
    The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    deployment str
    The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    openai_key str
    The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    apiBase String
    The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    deployment String
    The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
    openaiKey String
    The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource

    DestinationPgvectorConfigurationEmbeddingCohere, DestinationPgvectorConfigurationEmbeddingCohereArgs

    CohereKey string
    CohereKey string
    cohereKey String
    cohereKey string
    cohereKey String

    DestinationPgvectorConfigurationEmbeddingOpenAi, DestinationPgvectorConfigurationEmbeddingOpenAiArgs

    OpenaiKey string
    OpenaiKey string
    openaiKey String
    openaiKey string
    openaiKey String

    DestinationPgvectorConfigurationEmbeddingOpenAiCompatible, DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs

    BaseUrl string
    The base URL for your OpenAI-compatible service
    Dimensions double
    The number of dimensions the embedding model is generating
    ApiKey string
    Default: ""
    ModelName string
    The name of the model to use for embedding. Default: "text-embedding-ada-002"
    BaseUrl string
    The base URL for your OpenAI-compatible service
    Dimensions float64
    The number of dimensions the embedding model is generating
    ApiKey string
    Default: ""
    ModelName string
    The name of the model to use for embedding. Default: "text-embedding-ada-002"
    baseUrl String
    The base URL for your OpenAI-compatible service
    dimensions Double
    The number of dimensions the embedding model is generating
    apiKey String
    Default: ""
    modelName String
    The name of the model to use for embedding. Default: "text-embedding-ada-002"
    baseUrl string
    The base URL for your OpenAI-compatible service
    dimensions number
    The number of dimensions the embedding model is generating
    apiKey string
    Default: ""
    modelName string
    The name of the model to use for embedding. Default: "text-embedding-ada-002"
    base_url str
    The base URL for your OpenAI-compatible service
    dimensions float
    The number of dimensions the embedding model is generating
    api_key str
    Default: ""
    model_name str
    The name of the model to use for embedding. Default: "text-embedding-ada-002"
    baseUrl String
    The base URL for your OpenAI-compatible service
    dimensions Number
    The number of dimensions the embedding model is generating
    apiKey String
    Default: ""
    modelName String
    The name of the model to use for embedding. Default: "text-embedding-ada-002"

    DestinationPgvectorConfigurationIndexing, DestinationPgvectorConfigurationIndexingArgs

    Credentials DestinationPgvectorConfigurationIndexingCredentials
    Database string
    Enter the name of the database that you want to sync data into
    Host string
    Enter the account name you want to use to access the database.
    Username string
    Enter the name of the user you want to use to access the database
    DefaultSchema string
    Enter the name of the default schema. Default: "public"
    Port double
    Enter the port you want to use to access the database. Default: 5432
    Credentials DestinationPgvectorConfigurationIndexingCredentials
    Database string
    Enter the name of the database that you want to sync data into
    Host string
    Enter the account name you want to use to access the database.
    Username string
    Enter the name of the user you want to use to access the database
    DefaultSchema string
    Enter the name of the default schema. Default: "public"
    Port float64
    Enter the port you want to use to access the database. Default: 5432
    credentials DestinationPgvectorConfigurationIndexingCredentials
    database String
    Enter the name of the database that you want to sync data into
    host String
    Enter the account name you want to use to access the database.
    username String
    Enter the name of the user you want to use to access the database
    defaultSchema String
    Enter the name of the default schema. Default: "public"
    port Double
    Enter the port you want to use to access the database. Default: 5432
    credentials DestinationPgvectorConfigurationIndexingCredentials
    database string
    Enter the name of the database that you want to sync data into
    host string
    Enter the account name you want to use to access the database.
    username string
    Enter the name of the user you want to use to access the database
    defaultSchema string
    Enter the name of the default schema. Default: "public"
    port number
    Enter the port you want to use to access the database. Default: 5432
    credentials DestinationPgvectorConfigurationIndexingCredentials
    database str
    Enter the name of the database that you want to sync data into
    host str
    Enter the account name you want to use to access the database.
    username str
    Enter the name of the user you want to use to access the database
    default_schema str
    Enter the name of the default schema. Default: "public"
    port float
    Enter the port you want to use to access the database. Default: 5432
    credentials Property Map
    database String
    Enter the name of the database that you want to sync data into
    host String
    Enter the account name you want to use to access the database.
    username String
    Enter the name of the user you want to use to access the database
    defaultSchema String
    Enter the name of the default schema. Default: "public"
    port Number
    Enter the port you want to use to access the database. Default: 5432

    DestinationPgvectorConfigurationIndexingCredentials, DestinationPgvectorConfigurationIndexingCredentialsArgs

    Password string
    Enter the password you want to use to access the database
    Password string
    Enter the password you want to use to access the database
    password String
    Enter the password you want to use to access the database
    password string
    Enter the password you want to use to access the database
    password str
    Enter the password you want to use to access the database
    password String
    Enter the password you want to use to access the database

    DestinationPgvectorConfigurationProcessing, DestinationPgvectorConfigurationProcessingArgs

    ChunkSize double
    Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
    ChunkOverlap double
    Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
    FieldNameMappings List<DestinationPgvectorConfigurationProcessingFieldNameMapping>
    List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
    MetadataFields List<string>
    List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path.
    TextFields List<string>
    List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array.
    TextSplitter DestinationPgvectorConfigurationProcessingTextSplitter
    Split text fields into chunks based on the specified method.
    ChunkSize float64
    Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
    ChunkOverlap float64
    Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
    FieldNameMappings []DestinationPgvectorConfigurationProcessingFieldNameMapping
    List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
    MetadataFields []string
    List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path.
    TextFields []string
    List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array.
    TextSplitter DestinationPgvectorConfigurationProcessingTextSplitter
    Split text fields into chunks based on the specified method.
    chunkSize Double
    Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
    chunkOverlap Double
    Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
    fieldNameMappings List<DestinationPgvectorConfigurationProcessingFieldNameMapping>
    List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
    metadataFields List<String>
    List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path.
    textFields List<String>
    List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array.
    textSplitter DestinationPgvectorConfigurationProcessingTextSplitter
    Split text fields into chunks based on the specified method.
    chunkSize number
    Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
    chunkOverlap number
    Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
    fieldNameMappings DestinationPgvectorConfigurationProcessingFieldNameMapping[]
    List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
    metadataFields string[]
    List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path.
    textFields string[]
    List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array.
    textSplitter DestinationPgvectorConfigurationProcessingTextSplitter
    Split text fields into chunks based on the specified method.
    chunk_size float
    Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
    chunk_overlap float
    Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
    field_name_mappings Sequence[DestinationPgvectorConfigurationProcessingFieldNameMapping]
    List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
    metadata_fields Sequence[str]
    List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path.
    text_fields Sequence[str]
    List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array.
    text_splitter DestinationPgvectorConfigurationProcessingTextSplitter
    Split text fields into chunks based on the specified method.
    chunkSize Number
    Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
    chunkOverlap Number
    Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
    fieldNameMappings List<Property Map>
    List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
    metadataFields List<String>
    List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path.
    textFields List<String>
    List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. user.name will access the name field in the user object. It's also possible to use wildcards to access all fields in an object, e.g. users.*.name will access all names fields in all entries of the users array.
    textSplitter Property Map
    Split text fields into chunks based on the specified method.

    DestinationPgvectorConfigurationProcessingFieldNameMapping, DestinationPgvectorConfigurationProcessingFieldNameMappingArgs

    FromField string
    The field name in the source
    ToField string
    The field name to use in the destination
    FromField string
    The field name in the source
    ToField string
    The field name to use in the destination
    fromField String
    The field name in the source
    toField String
    The field name to use in the destination
    fromField string
    The field name in the source
    toField string
    The field name to use in the destination
    from_field str
    The field name in the source
    to_field str
    The field name to use in the destination
    fromField String
    The field name in the source
    toField String
    The field name to use in the destination

    DestinationPgvectorConfigurationProcessingTextSplitter, DestinationPgvectorConfigurationProcessingTextSplitterArgs

    ByMarkdownHeader DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeader
    Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
    ByProgrammingLanguage DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguage
    Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
    BySeparator DestinationPgvectorConfigurationProcessingTextSplitterBySeparator
    Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
    ByMarkdownHeader DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeader
    Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
    ByProgrammingLanguage DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguage
    Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
    BySeparator DestinationPgvectorConfigurationProcessingTextSplitterBySeparator
    Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
    byMarkdownHeader DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeader
    Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
    byProgrammingLanguage DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguage
    Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
    bySeparator DestinationPgvectorConfigurationProcessingTextSplitterBySeparator
    Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
    byMarkdownHeader DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeader
    Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
    byProgrammingLanguage DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguage
    Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
    bySeparator DestinationPgvectorConfigurationProcessingTextSplitterBySeparator
    Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
    by_markdown_header DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeader
    Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
    by_programming_language DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguage
    Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
    by_separator DestinationPgvectorConfigurationProcessingTextSplitterBySeparator
    Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
    byMarkdownHeader Property Map
    Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
    byProgrammingLanguage Property Map
    Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
    bySeparator Property Map
    Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.

    DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeader, DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs

    SplitLevel double
    Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
    SplitLevel float64
    Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
    splitLevel Double
    Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
    splitLevel number
    Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
    split_level float
    Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
    splitLevel Number
    Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1

    DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguage, DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs

    Language string
    Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
    Language string
    Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
    language String
    Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
    language string
    Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
    language str
    Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
    language String
    Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]

    DestinationPgvectorConfigurationProcessingTextSplitterBySeparator, DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs

    KeepSeparator bool
    Whether to keep the separator in the resulting chunks. Default: false
    Separators List<string>
    List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
    KeepSeparator bool
    Whether to keep the separator in the resulting chunks. Default: false
    Separators []string
    List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
    keepSeparator Boolean
    Whether to keep the separator in the resulting chunks. Default: false
    separators List<String>
    List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
    keepSeparator boolean
    Whether to keep the separator in the resulting chunks. Default: false
    separators string[]
    List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
    keep_separator bool
    Whether to keep the separator in the resulting chunks. Default: false
    separators Sequence[str]
    List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
    keepSeparator Boolean
    Whether to keep the separator in the resulting chunks. Default: false
    separators List<String>
    List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".

    Import

    $ pulumi import airbyte:index/destinationPgvector:DestinationPgvector my_airbyte_destination_pgvector ""
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    airbyte airbytehq/terraform-provider-airbyte
    License
    Notes
    This Pulumi package is based on the airbyte Terraform Provider.
    airbyte logo
    airbyte 0.7.0-beta2 published on Friday, Mar 7, 2025 by airbytehq