airbyte.DestinationPgvector
Explore with Pulumi AI
DestinationPgvector Resource
Example Usage
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.airbyte.DestinationPgvector;
import com.pulumi.airbyte.DestinationPgvectorArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingCohereArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingFakeArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingOpenAiArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationIndexingArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationIndexingCredentialsArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs;
import com.pulumi.airbyte.inputs.DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var myDestinationPgvector = new DestinationPgvector("myDestinationPgvector", DestinationPgvectorArgs.builder()
.configuration(DestinationPgvectorConfigurationArgs.builder()
.embedding(DestinationPgvectorConfigurationEmbeddingArgs.builder()
.azureOpenAi(DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs.builder()
.apiBase("https://your-resource-name.openai.azure.com")
.deployment("your-resource-name")
.openaiKey("...my_openai_key...")
.build())
.cohere(DestinationPgvectorConfigurationEmbeddingCohereArgs.builder()
.cohereKey("...my_cohere_key...")
.build())
.fake()
.openAi(DestinationPgvectorConfigurationEmbeddingOpenAiArgs.builder()
.openaiKey("...my_openai_key...")
.build())
.openAiCompatible(DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs.builder()
.apiKey("...my_api_key...")
.baseUrl("https://your-service-name.com")
.dimensions(1536)
.modelName("text-embedding-ada-002")
.build())
.build())
.indexing(DestinationPgvectorConfigurationIndexingArgs.builder()
.credentials(DestinationPgvectorConfigurationIndexingCredentialsArgs.builder()
.password("AIRBYTE_PASSWORD")
.build())
.database("AIRBYTE_DATABASE")
.defaultSchema("AIRBYTE_SCHEMA")
.host("AIRBYTE_ACCOUNT")
.port(5432)
.username("AIRBYTE_USER")
.build())
.omit_raw_text(true)
.processing(DestinationPgvectorConfigurationProcessingArgs.builder()
.chunkOverlap(7)
.chunkSize(8035)
.fieldNameMappings(DestinationPgvectorConfigurationProcessingFieldNameMappingArgs.builder()
.fromField("...my_from_field...")
.toField("...my_to_field...")
.build())
.metadataFields("...")
.textFields("...")
.textSplitter(DestinationPgvectorConfigurationProcessingTextSplitterArgs.builder()
.byMarkdownHeader(DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs.builder()
.splitLevel(5)
.build())
.byProgrammingLanguage(DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs.builder()
.language("js")
.build())
.bySeparator(DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs.builder()
.keepSeparator(false)
.separators("...")
.build())
.build())
.build())
.build())
.definitionId("ace91495-b654-40da-a8bd-73a5b3a4b3ee")
.workspaceId("0b8f211f-70ad-47f2-a6ea-1e915e8005be")
.build());
}
}
resources:
myDestinationPgvector:
type: airbyte:DestinationPgvector
properties:
configuration:
embedding:
azureOpenAi:
apiBase: https://your-resource-name.openai.azure.com
deployment: your-resource-name
openaiKey: '...my_openai_key...'
cohere:
cohereKey: '...my_cohere_key...'
fake: {}
openAi:
openaiKey: '...my_openai_key...'
openAiCompatible:
apiKey: '...my_api_key...'
baseUrl: https://your-service-name.com
dimensions: 1536
modelName: text-embedding-ada-002
indexing:
credentials:
password: AIRBYTE_PASSWORD
database: AIRBYTE_DATABASE
defaultSchema: AIRBYTE_SCHEMA
host: AIRBYTE_ACCOUNT
port: 5432
username: AIRBYTE_USER
omit_raw_text: true
processing:
chunkOverlap: 7
chunkSize: 8035
fieldNameMappings:
- fromField: '...my_from_field...'
toField: '...my_to_field...'
metadataFields:
- '...'
textFields:
- '...'
textSplitter:
byMarkdownHeader:
splitLevel: 5
byProgrammingLanguage:
language: js
bySeparator:
keepSeparator: false
separators:
- '...'
definitionId: ace91495-b654-40da-a8bd-73a5b3a4b3ee
workspaceId: 0b8f211f-70ad-47f2-a6ea-1e915e8005be
Create DestinationPgvector Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DestinationPgvector(name: string, args: DestinationPgvectorArgs, opts?: CustomResourceOptions);
@overload
def DestinationPgvector(resource_name: str,
args: DestinationPgvectorArgs,
opts: Optional[ResourceOptions] = None)
@overload
def DestinationPgvector(resource_name: str,
opts: Optional[ResourceOptions] = None,
configuration: Optional[DestinationPgvectorConfigurationArgs] = None,
workspace_id: Optional[str] = None,
definition_id: Optional[str] = None,
name: Optional[str] = None)
func NewDestinationPgvector(ctx *Context, name string, args DestinationPgvectorArgs, opts ...ResourceOption) (*DestinationPgvector, error)
public DestinationPgvector(string name, DestinationPgvectorArgs args, CustomResourceOptions? opts = null)
public DestinationPgvector(String name, DestinationPgvectorArgs args)
public DestinationPgvector(String name, DestinationPgvectorArgs args, CustomResourceOptions options)
type: airbyte:DestinationPgvector
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DestinationPgvectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DestinationPgvectorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DestinationPgvectorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DestinationPgvectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DestinationPgvectorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var destinationPgvectorResource = new Airbyte.DestinationPgvector("destinationPgvectorResource", new()
{
Configuration = new Airbyte.Inputs.DestinationPgvectorConfigurationArgs
{
Embedding = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingArgs
{
AzureOpenAi = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs
{
ApiBase = "string",
Deployment = "string",
OpenaiKey = "string",
},
Cohere = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingCohereArgs
{
CohereKey = "string",
},
Fake = null,
OpenAi = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingOpenAiArgs
{
OpenaiKey = "string",
},
OpenAiCompatible = new Airbyte.Inputs.DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs
{
BaseUrl = "string",
Dimensions = 0,
ApiKey = "string",
ModelName = "string",
},
},
Indexing = new Airbyte.Inputs.DestinationPgvectorConfigurationIndexingArgs
{
Credentials = new Airbyte.Inputs.DestinationPgvectorConfigurationIndexingCredentialsArgs
{
Password = "string",
},
Database = "string",
Host = "string",
Username = "string",
DefaultSchema = "string",
Port = 0,
},
Processing = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingArgs
{
ChunkSize = 0,
ChunkOverlap = 0,
FieldNameMappings = new[]
{
new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingFieldNameMappingArgs
{
FromField = "string",
ToField = "string",
},
},
MetadataFields = new[]
{
"string",
},
TextFields = new[]
{
"string",
},
TextSplitter = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterArgs
{
ByMarkdownHeader = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs
{
SplitLevel = 0,
},
ByProgrammingLanguage = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs
{
Language = "string",
},
BySeparator = new Airbyte.Inputs.DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs
{
KeepSeparator = false,
Separators = new[]
{
"string",
},
},
},
},
OmitRawText = false,
},
WorkspaceId = "string",
DefinitionId = "string",
Name = "string",
});
example, err := airbyte.NewDestinationPgvector(ctx, "destinationPgvectorResource", &airbyte.DestinationPgvectorArgs{
Configuration: &.DestinationPgvectorConfigurationArgs{
Embedding: &.DestinationPgvectorConfigurationEmbeddingArgs{
AzureOpenAi: &.DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs{
ApiBase: pulumi.String("string"),
Deployment: pulumi.String("string"),
OpenaiKey: pulumi.String("string"),
},
Cohere: &.DestinationPgvectorConfigurationEmbeddingCohereArgs{
CohereKey: pulumi.String("string"),
},
Fake: &.DestinationPgvectorConfigurationEmbeddingFakeArgs{
},
OpenAi: &.DestinationPgvectorConfigurationEmbeddingOpenAiArgs{
OpenaiKey: pulumi.String("string"),
},
OpenAiCompatible: &.DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs{
BaseUrl: pulumi.String("string"),
Dimensions: pulumi.Float64(0),
ApiKey: pulumi.String("string"),
ModelName: pulumi.String("string"),
},
},
Indexing: &.DestinationPgvectorConfigurationIndexingArgs{
Credentials: &.DestinationPgvectorConfigurationIndexingCredentialsArgs{
Password: pulumi.String("string"),
},
Database: pulumi.String("string"),
Host: pulumi.String("string"),
Username: pulumi.String("string"),
DefaultSchema: pulumi.String("string"),
Port: pulumi.Float64(0),
},
Processing: &.DestinationPgvectorConfigurationProcessingArgs{
ChunkSize: pulumi.Float64(0),
ChunkOverlap: pulumi.Float64(0),
FieldNameMappings: .DestinationPgvectorConfigurationProcessingFieldNameMappingArray{
&.DestinationPgvectorConfigurationProcessingFieldNameMappingArgs{
FromField: pulumi.String("string"),
ToField: pulumi.String("string"),
},
},
MetadataFields: pulumi.StringArray{
pulumi.String("string"),
},
TextFields: pulumi.StringArray{
pulumi.String("string"),
},
TextSplitter: &.DestinationPgvectorConfigurationProcessingTextSplitterArgs{
ByMarkdownHeader: &.DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs{
SplitLevel: pulumi.Float64(0),
},
ByProgrammingLanguage: &.DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs{
Language: pulumi.String("string"),
},
BySeparator: &.DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs{
KeepSeparator: pulumi.Bool(false),
Separators: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
OmitRawText: pulumi.Bool(false),
},
WorkspaceId: pulumi.String("string"),
DefinitionId: pulumi.String("string"),
Name: pulumi.String("string"),
})
var destinationPgvectorResource = new DestinationPgvector("destinationPgvectorResource", DestinationPgvectorArgs.builder()
.configuration(DestinationPgvectorConfigurationArgs.builder()
.embedding(DestinationPgvectorConfigurationEmbeddingArgs.builder()
.azureOpenAi(DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs.builder()
.apiBase("string")
.deployment("string")
.openaiKey("string")
.build())
.cohere(DestinationPgvectorConfigurationEmbeddingCohereArgs.builder()
.cohereKey("string")
.build())
.fake()
.openAi(DestinationPgvectorConfigurationEmbeddingOpenAiArgs.builder()
.openaiKey("string")
.build())
.openAiCompatible(DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs.builder()
.baseUrl("string")
.dimensions(0)
.apiKey("string")
.modelName("string")
.build())
.build())
.indexing(DestinationPgvectorConfigurationIndexingArgs.builder()
.credentials(DestinationPgvectorConfigurationIndexingCredentialsArgs.builder()
.password("string")
.build())
.database("string")
.host("string")
.username("string")
.defaultSchema("string")
.port(0)
.build())
.processing(DestinationPgvectorConfigurationProcessingArgs.builder()
.chunkSize(0)
.chunkOverlap(0)
.fieldNameMappings(DestinationPgvectorConfigurationProcessingFieldNameMappingArgs.builder()
.fromField("string")
.toField("string")
.build())
.metadataFields("string")
.textFields("string")
.textSplitter(DestinationPgvectorConfigurationProcessingTextSplitterArgs.builder()
.byMarkdownHeader(DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs.builder()
.splitLevel(0)
.build())
.byProgrammingLanguage(DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs.builder()
.language("string")
.build())
.bySeparator(DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs.builder()
.keepSeparator(false)
.separators("string")
.build())
.build())
.build())
.omitRawText(false)
.build())
.workspaceId("string")
.definitionId("string")
.name("string")
.build());
destination_pgvector_resource = airbyte.DestinationPgvector("destinationPgvectorResource",
configuration={
"embedding": {
"azure_open_ai": {
"api_base": "string",
"deployment": "string",
"openai_key": "string",
},
"cohere": {
"cohere_key": "string",
},
"fake": {},
"open_ai": {
"openai_key": "string",
},
"open_ai_compatible": {
"base_url": "string",
"dimensions": 0,
"api_key": "string",
"model_name": "string",
},
},
"indexing": {
"credentials": {
"password": "string",
},
"database": "string",
"host": "string",
"username": "string",
"default_schema": "string",
"port": 0,
},
"processing": {
"chunk_size": 0,
"chunk_overlap": 0,
"field_name_mappings": [{
"from_field": "string",
"to_field": "string",
}],
"metadata_fields": ["string"],
"text_fields": ["string"],
"text_splitter": {
"by_markdown_header": {
"split_level": 0,
},
"by_programming_language": {
"language": "string",
},
"by_separator": {
"keep_separator": False,
"separators": ["string"],
},
},
},
"omit_raw_text": False,
},
workspace_id="string",
definition_id="string",
name="string")
const destinationPgvectorResource = new airbyte.DestinationPgvector("destinationPgvectorResource", {
configuration: {
embedding: {
azureOpenAi: {
apiBase: "string",
deployment: "string",
openaiKey: "string",
},
cohere: {
cohereKey: "string",
},
fake: {},
openAi: {
openaiKey: "string",
},
openAiCompatible: {
baseUrl: "string",
dimensions: 0,
apiKey: "string",
modelName: "string",
},
},
indexing: {
credentials: {
password: "string",
},
database: "string",
host: "string",
username: "string",
defaultSchema: "string",
port: 0,
},
processing: {
chunkSize: 0,
chunkOverlap: 0,
fieldNameMappings: [{
fromField: "string",
toField: "string",
}],
metadataFields: ["string"],
textFields: ["string"],
textSplitter: {
byMarkdownHeader: {
splitLevel: 0,
},
byProgrammingLanguage: {
language: "string",
},
bySeparator: {
keepSeparator: false,
separators: ["string"],
},
},
},
omitRawText: false,
},
workspaceId: "string",
definitionId: "string",
name: "string",
});
type: airbyte:DestinationPgvector
properties:
configuration:
embedding:
azureOpenAi:
apiBase: string
deployment: string
openaiKey: string
cohere:
cohereKey: string
fake: {}
openAi:
openaiKey: string
openAiCompatible:
apiKey: string
baseUrl: string
dimensions: 0
modelName: string
indexing:
credentials:
password: string
database: string
defaultSchema: string
host: string
port: 0
username: string
omitRawText: false
processing:
chunkOverlap: 0
chunkSize: 0
fieldNameMappings:
- fromField: string
toField: string
metadataFields:
- string
textFields:
- string
textSplitter:
byMarkdownHeader:
splitLevel: 0
byProgrammingLanguage:
language: string
bySeparator:
keepSeparator: false
separators:
- string
definitionId: string
name: string
workspaceId: string
DestinationPgvector Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The DestinationPgvector resource accepts the following input properties:
- Configuration
Destination
Pgvector Configuration - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- Workspace
Id string - Definition
Id string - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- Name string
- Name of the destination e.g. dev-mysql-instance.
- Configuration
Destination
Pgvector Configuration Args - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- Workspace
Id string - Definition
Id string - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- Name string
- Name of the destination e.g. dev-mysql-instance.
- configuration
Destination
Pgvector Configuration - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- workspace
Id String - definition
Id String - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name String
- Name of the destination e.g. dev-mysql-instance.
- configuration
Destination
Pgvector Configuration - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- workspace
Id string - definition
Id string - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name string
- Name of the destination e.g. dev-mysql-instance.
- configuration
Destination
Pgvector Configuration Args - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- workspace_
id str - definition_
id str - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name str
- Name of the destination e.g. dev-mysql-instance.
- configuration Property Map
- The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- workspace
Id String - definition
Id String - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name String
- Name of the destination e.g. dev-mysql-instance.
Outputs
All input properties are implicitly available as output properties. Additionally, the DestinationPgvector resource produces the following output properties:
- Created
At double - Destination
Id string - Destination
Type string - Id string
- The provider-assigned unique ID for this managed resource.
- Created
At float64 - Destination
Id string - Destination
Type string - Id string
- The provider-assigned unique ID for this managed resource.
- created
At Double - destination
Id String - destination
Type String - id String
- The provider-assigned unique ID for this managed resource.
- created
At number - destination
Id string - destination
Type string - id string
- The provider-assigned unique ID for this managed resource.
- created_
at float - destination_
id str - destination_
type str - id str
- The provider-assigned unique ID for this managed resource.
- created
At Number - destination
Id String - destination
Type String - id String
- The provider-assigned unique ID for this managed resource.
Look up Existing DestinationPgvector Resource
Get an existing DestinationPgvector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DestinationPgvectorState, opts?: CustomResourceOptions): DestinationPgvector
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
configuration: Optional[DestinationPgvectorConfigurationArgs] = None,
created_at: Optional[float] = None,
definition_id: Optional[str] = None,
destination_id: Optional[str] = None,
destination_type: Optional[str] = None,
name: Optional[str] = None,
workspace_id: Optional[str] = None) -> DestinationPgvector
func GetDestinationPgvector(ctx *Context, name string, id IDInput, state *DestinationPgvectorState, opts ...ResourceOption) (*DestinationPgvector, error)
public static DestinationPgvector Get(string name, Input<string> id, DestinationPgvectorState? state, CustomResourceOptions? opts = null)
public static DestinationPgvector get(String name, Output<String> id, DestinationPgvectorState state, CustomResourceOptions options)
resources: _: type: airbyte:DestinationPgvector get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Configuration
Destination
Pgvector Configuration - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- Created
At double - Definition
Id string - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- Destination
Id string - Destination
Type string - Name string
- Name of the destination e.g. dev-mysql-instance.
- Workspace
Id string
- Configuration
Destination
Pgvector Configuration Args - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- Created
At float64 - Definition
Id string - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- Destination
Id string - Destination
Type string - Name string
- Name of the destination e.g. dev-mysql-instance.
- Workspace
Id string
- configuration
Destination
Pgvector Configuration - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- created
At Double - definition
Id String - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destination
Id String - destination
Type String - name String
- Name of the destination e.g. dev-mysql-instance.
- workspace
Id String
- configuration
Destination
Pgvector Configuration - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- created
At number - definition
Id string - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destination
Id string - destination
Type string - name string
- Name of the destination e.g. dev-mysql-instance.
- workspace
Id string
- configuration
Destination
Pgvector Configuration Args - The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- created_
at float - definition_
id str - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destination_
id str - destination_
type str - name str
- Name of the destination e.g. dev-mysql-instance.
- workspace_
id str
- configuration Property Map
- The configuration model for the Vector DB based destinations. This model is used to generate the UI for the destination configuration, as well as to provide type safety for the configuration passed to the destination.
- created
At Number - definition
Id String - The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destination
Id String - destination
Type String - name String
- Name of the destination e.g. dev-mysql-instance.
- workspace
Id String
Supporting Types
DestinationPgvectorConfiguration, DestinationPgvectorConfigurationArgs
- Embedding
Destination
Pgvector Configuration Embedding - Embedding configuration
- Indexing
Destination
Pgvector Configuration Indexing - Postgres can be used to store vector data and retrieve embeddings.
- Processing
Destination
Pgvector Configuration Processing - Omit
Raw boolText - Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
- Embedding
Destination
Pgvector Configuration Embedding - Embedding configuration
- Indexing
Destination
Pgvector Configuration Indexing - Postgres can be used to store vector data and retrieve embeddings.
- Processing
Destination
Pgvector Configuration Processing - Omit
Raw boolText - Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
- embedding
Destination
Pgvector Configuration Embedding - Embedding configuration
- indexing
Destination
Pgvector Configuration Indexing - Postgres can be used to store vector data and retrieve embeddings.
- processing
Destination
Pgvector Configuration Processing - omit
Raw BooleanText - Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
- embedding
Destination
Pgvector Configuration Embedding - Embedding configuration
- indexing
Destination
Pgvector Configuration Indexing - Postgres can be used to store vector data and retrieve embeddings.
- processing
Destination
Pgvector Configuration Processing - omit
Raw booleanText - Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
- embedding
Destination
Pgvector Configuration Embedding - Embedding configuration
- indexing
Destination
Pgvector Configuration Indexing - Postgres can be used to store vector data and retrieve embeddings.
- processing
Destination
Pgvector Configuration Processing - omit_
raw_ booltext - Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
- embedding Property Map
- Embedding configuration
- indexing Property Map
- Postgres can be used to store vector data and retrieve embeddings.
- processing Property Map
- omit
Raw BooleanText - Do not store the text that gets embedded along with the vector and the metadata in the destination. If set to true, only the vector and the metadata will be stored - in this case raw text for LLM use cases needs to be retrieved from another source. Default: false
DestinationPgvectorConfigurationEmbedding, DestinationPgvectorConfigurationEmbeddingArgs
- Azure
Open DestinationAi Pgvector Configuration Embedding Azure Open Ai - Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- Cohere
Destination
Pgvector Configuration Embedding Cohere - Use the Cohere API to embed text.
- Fake
Destination
Pgvector Configuration Embedding Fake - Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
- Open
Ai DestinationPgvector Configuration Embedding Open Ai - Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- Open
Ai DestinationCompatible Pgvector Configuration Embedding Open Ai Compatible - Use a service that's compatible with the OpenAI API to embed text.
- Azure
Open DestinationAi Pgvector Configuration Embedding Azure Open Ai - Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- Cohere
Destination
Pgvector Configuration Embedding Cohere - Use the Cohere API to embed text.
- Fake
Destination
Pgvector Configuration Embedding Fake - Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
- Open
Ai DestinationPgvector Configuration Embedding Open Ai - Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- Open
Ai DestinationCompatible Pgvector Configuration Embedding Open Ai Compatible - Use a service that's compatible with the OpenAI API to embed text.
- azure
Open DestinationAi Pgvector Configuration Embedding Azure Open Ai - Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- cohere
Destination
Pgvector Configuration Embedding Cohere - Use the Cohere API to embed text.
- fake
Destination
Pgvector Configuration Embedding Fake - Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
- open
Ai DestinationPgvector Configuration Embedding Open Ai - Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- open
Ai DestinationCompatible Pgvector Configuration Embedding Open Ai Compatible - Use a service that's compatible with the OpenAI API to embed text.
- azure
Open DestinationAi Pgvector Configuration Embedding Azure Open Ai - Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- cohere
Destination
Pgvector Configuration Embedding Cohere - Use the Cohere API to embed text.
- fake
Destination
Pgvector Configuration Embedding Fake - Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
- open
Ai DestinationPgvector Configuration Embedding Open Ai - Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- open
Ai DestinationCompatible Pgvector Configuration Embedding Open Ai Compatible - Use a service that's compatible with the OpenAI API to embed text.
- azure_
open_ Destinationai Pgvector Configuration Embedding Azure Open Ai - Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- cohere
Destination
Pgvector Configuration Embedding Cohere - Use the Cohere API to embed text.
- fake
Destination
Pgvector Configuration Embedding Fake - Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
- open_
ai DestinationPgvector Configuration Embedding Open Ai - Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- open_
ai_ Destinationcompatible Pgvector Configuration Embedding Open Ai Compatible - Use a service that's compatible with the OpenAI API to embed text.
- azure
Open Property MapAi - Use the Azure-hosted OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- cohere Property Map
- Use the Cohere API to embed text.
- fake Property Map
- Use a fake embedding made out of random vectors with 1536 embedding dimensions. This is useful for testing the data pipeline without incurring any costs.
- open
Ai Property Map - Use the OpenAI API to embed text. This option is using the text-embedding-ada-002 model with 1536 embedding dimensions.
- open
Ai Property MapCompatible - Use a service that's compatible with the OpenAI API to embed text.
DestinationPgvectorConfigurationEmbeddingAzureOpenAi, DestinationPgvectorConfigurationEmbeddingAzureOpenAiArgs
- Api
Base string - The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- Deployment string
- The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- Openai
Key string - The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- Api
Base string - The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- Deployment string
- The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- Openai
Key string - The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- api
Base String - The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- deployment String
- The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- openai
Key String - The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- api
Base string - The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- deployment string
- The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- openai
Key string - The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- api_
base str - The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- deployment str
- The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- openai_
key str - The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- api
Base String - The base URL for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- deployment String
- The deployment for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
- openai
Key String - The API key for your Azure OpenAI resource. You can find this in the Azure portal under your Azure OpenAI resource
DestinationPgvectorConfigurationEmbeddingCohere, DestinationPgvectorConfigurationEmbeddingCohereArgs
- Cohere
Key string
- Cohere
Key string
- cohere
Key String
- cohere
Key string
- cohere_
key str
- cohere
Key String
DestinationPgvectorConfigurationEmbeddingOpenAi, DestinationPgvectorConfigurationEmbeddingOpenAiArgs
- Openai
Key string
- Openai
Key string
- openai
Key String
- openai
Key string
- openai_
key str
- openai
Key String
DestinationPgvectorConfigurationEmbeddingOpenAiCompatible, DestinationPgvectorConfigurationEmbeddingOpenAiCompatibleArgs
- Base
Url string - The base URL for your OpenAI-compatible service
- Dimensions double
- The number of dimensions the embedding model is generating
- Api
Key string - Default: ""
- Model
Name string - The name of the model to use for embedding. Default: "text-embedding-ada-002"
- Base
Url string - The base URL for your OpenAI-compatible service
- Dimensions float64
- The number of dimensions the embedding model is generating
- Api
Key string - Default: ""
- Model
Name string - The name of the model to use for embedding. Default: "text-embedding-ada-002"
- base
Url String - The base URL for your OpenAI-compatible service
- dimensions Double
- The number of dimensions the embedding model is generating
- api
Key String - Default: ""
- model
Name String - The name of the model to use for embedding. Default: "text-embedding-ada-002"
- base
Url string - The base URL for your OpenAI-compatible service
- dimensions number
- The number of dimensions the embedding model is generating
- api
Key string - Default: ""
- model
Name string - The name of the model to use for embedding. Default: "text-embedding-ada-002"
- base_
url str - The base URL for your OpenAI-compatible service
- dimensions float
- The number of dimensions the embedding model is generating
- api_
key str - Default: ""
- model_
name str - The name of the model to use for embedding. Default: "text-embedding-ada-002"
- base
Url String - The base URL for your OpenAI-compatible service
- dimensions Number
- The number of dimensions the embedding model is generating
- api
Key String - Default: ""
- model
Name String - The name of the model to use for embedding. Default: "text-embedding-ada-002"
DestinationPgvectorConfigurationIndexing, DestinationPgvectorConfigurationIndexingArgs
- Credentials
Destination
Pgvector Configuration Indexing Credentials - Database string
- Enter the name of the database that you want to sync data into
- Host string
- Enter the account name you want to use to access the database.
- Username string
- Enter the name of the user you want to use to access the database
- Default
Schema string - Enter the name of the default schema. Default: "public"
- Port double
- Enter the port you want to use to access the database. Default: 5432
- Credentials
Destination
Pgvector Configuration Indexing Credentials - Database string
- Enter the name of the database that you want to sync data into
- Host string
- Enter the account name you want to use to access the database.
- Username string
- Enter the name of the user you want to use to access the database
- Default
Schema string - Enter the name of the default schema. Default: "public"
- Port float64
- Enter the port you want to use to access the database. Default: 5432
- credentials
Destination
Pgvector Configuration Indexing Credentials - database String
- Enter the name of the database that you want to sync data into
- host String
- Enter the account name you want to use to access the database.
- username String
- Enter the name of the user you want to use to access the database
- default
Schema String - Enter the name of the default schema. Default: "public"
- port Double
- Enter the port you want to use to access the database. Default: 5432
- credentials
Destination
Pgvector Configuration Indexing Credentials - database string
- Enter the name of the database that you want to sync data into
- host string
- Enter the account name you want to use to access the database.
- username string
- Enter the name of the user you want to use to access the database
- default
Schema string - Enter the name of the default schema. Default: "public"
- port number
- Enter the port you want to use to access the database. Default: 5432
- credentials
Destination
Pgvector Configuration Indexing Credentials - database str
- Enter the name of the database that you want to sync data into
- host str
- Enter the account name you want to use to access the database.
- username str
- Enter the name of the user you want to use to access the database
- default_
schema str - Enter the name of the default schema. Default: "public"
- port float
- Enter the port you want to use to access the database. Default: 5432
- credentials Property Map
- database String
- Enter the name of the database that you want to sync data into
- host String
- Enter the account name you want to use to access the database.
- username String
- Enter the name of the user you want to use to access the database
- default
Schema String - Enter the name of the default schema. Default: "public"
- port Number
- Enter the port you want to use to access the database. Default: 5432
DestinationPgvectorConfigurationIndexingCredentials, DestinationPgvectorConfigurationIndexingCredentialsArgs
- Password string
- Enter the password you want to use to access the database
- Password string
- Enter the password you want to use to access the database
- password String
- Enter the password you want to use to access the database
- password string
- Enter the password you want to use to access the database
- password str
- Enter the password you want to use to access the database
- password String
- Enter the password you want to use to access the database
DestinationPgvectorConfigurationProcessing, DestinationPgvectorConfigurationProcessingArgs
- Chunk
Size double - Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
- Chunk
Overlap double - Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
- Field
Name List<DestinationMappings Pgvector Configuration Processing Field Name Mapping> - List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
- Metadata
Fields List<string> - List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path. - Text
Fields List<string> - List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. - Text
Splitter DestinationPgvector Configuration Processing Text Splitter - Split text fields into chunks based on the specified method.
- Chunk
Size float64 - Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
- Chunk
Overlap float64 - Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
- Field
Name []DestinationMappings Pgvector Configuration Processing Field Name Mapping - List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
- Metadata
Fields []string - List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path. - Text
Fields []string - List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. - Text
Splitter DestinationPgvector Configuration Processing Text Splitter - Split text fields into chunks based on the specified method.
- chunk
Size Double - Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
- chunk
Overlap Double - Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
- field
Name List<DestinationMappings Pgvector Configuration Processing Field Name Mapping> - List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
- metadata
Fields List<String> - List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path. - text
Fields List<String> - List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. - text
Splitter DestinationPgvector Configuration Processing Text Splitter - Split text fields into chunks based on the specified method.
- chunk
Size number - Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
- chunk
Overlap number - Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
- field
Name DestinationMappings Pgvector Configuration Processing Field Name Mapping[] - List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
- metadata
Fields string[] - List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path. - text
Fields string[] - List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. - text
Splitter DestinationPgvector Configuration Processing Text Splitter - Split text fields into chunks based on the specified method.
- chunk_
size float - Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
- chunk_
overlap float - Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
- field_
name_ Sequence[Destinationmappings Pgvector Configuration Processing Field Name Mapping] - List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
- metadata_
fields Sequence[str] - List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path. - text_
fields Sequence[str] - List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. - text_
splitter DestinationPgvector Configuration Processing Text Splitter - Split text fields into chunks based on the specified method.
- chunk
Size Number - Size of chunks in tokens to store in vector store (make sure it is not too big for the context if your LLM)
- chunk
Overlap Number - Size of overlap between chunks in tokens to store in vector store to better capture relevant context. Default: 0
- field
Name List<Property Map>Mappings - List of fields to rename. Not applicable for nested fields, but can be used to rename fields already flattened via dot notation.
- metadata
Fields List<String> - List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path. - text
Fields List<String> - List of fields in the record that should be used to calculate the embedding. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g.
user.name
will access thename
field in theuser
object. It's also possible to use wildcards to access all fields in an object, e.g.users.*.name
will access allnames
fields in all entries of theusers
array. - text
Splitter Property Map - Split text fields into chunks based on the specified method.
DestinationPgvectorConfigurationProcessingFieldNameMapping, DestinationPgvectorConfigurationProcessingFieldNameMappingArgs
- from_
field str - The field name in the source
- to_
field str - The field name to use in the destination
DestinationPgvectorConfigurationProcessingTextSplitter, DestinationPgvectorConfigurationProcessingTextSplitterArgs
- By
Markdown DestinationHeader Pgvector Configuration Processing Text Splitter By Markdown Header - Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
- By
Programming DestinationLanguage Pgvector Configuration Processing Text Splitter By Programming Language - Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
- By
Separator DestinationPgvector Configuration Processing Text Splitter By Separator - Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
- By
Markdown DestinationHeader Pgvector Configuration Processing Text Splitter By Markdown Header - Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
- By
Programming DestinationLanguage Pgvector Configuration Processing Text Splitter By Programming Language - Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
- By
Separator DestinationPgvector Configuration Processing Text Splitter By Separator - Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
- by
Markdown DestinationHeader Pgvector Configuration Processing Text Splitter By Markdown Header - Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
- by
Programming DestinationLanguage Pgvector Configuration Processing Text Splitter By Programming Language - Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
- by
Separator DestinationPgvector Configuration Processing Text Splitter By Separator - Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
- by
Markdown DestinationHeader Pgvector Configuration Processing Text Splitter By Markdown Header - Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
- by
Programming DestinationLanguage Pgvector Configuration Processing Text Splitter By Programming Language - Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
- by
Separator DestinationPgvector Configuration Processing Text Splitter By Separator - Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
- by_
markdown_ Destinationheader Pgvector Configuration Processing Text Splitter By Markdown Header - Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
- by_
programming_ Destinationlanguage Pgvector Configuration Processing Text Splitter By Programming Language - Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
- by_
separator DestinationPgvector Configuration Processing Text Splitter By Separator - Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
- by
Markdown Property MapHeader - Split the text by Markdown headers down to the specified header level. If the chunk size fits multiple sections, they will be combined into a single chunk.
- by
Programming Property MapLanguage - Split the text by suitable delimiters based on the programming language. This is useful for splitting code into chunks.
- by
Separator Property Map - Split the text by the list of separators until the chunk size is reached, using the earlier mentioned separators where possible. This is useful for splitting text fields by paragraphs, sentences, words, etc.
DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeader, DestinationPgvectorConfigurationProcessingTextSplitterByMarkdownHeaderArgs
- Split
Level double - Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
- Split
Level float64 - Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
- split
Level Double - Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
- split
Level number - Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
- split_
level float - Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
- split
Level Number - Level of markdown headers to split text fields by. Headings down to the specified level will be used as split points. Default: 1
DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguage, DestinationPgvectorConfigurationProcessingTextSplitterByProgrammingLanguageArgs
- Language string
- Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
- Language string
- Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
- language String
- Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
- language string
- Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
- language str
- Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
- language String
- Split code in suitable places based on the programming language. must be one of ["cpp", "go", "java", "js", "php", "proto", "python", "rst", "ruby", "rust", "scala", "swift", "markdown", "latex", "html", "sol"]
DestinationPgvectorConfigurationProcessingTextSplitterBySeparator, DestinationPgvectorConfigurationProcessingTextSplitterBySeparatorArgs
- Keep
Separator bool - Whether to keep the separator in the resulting chunks. Default: false
- Separators List<string>
- List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
- Keep
Separator bool - Whether to keep the separator in the resulting chunks. Default: false
- Separators []string
- List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
- keep
Separator Boolean - Whether to keep the separator in the resulting chunks. Default: false
- separators List<String>
- List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
- keep
Separator boolean - Whether to keep the separator in the resulting chunks. Default: false
- separators string[]
- List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
- keep_
separator bool - Whether to keep the separator in the resulting chunks. Default: false
- separators Sequence[str]
- List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
- keep
Separator Boolean - Whether to keep the separator in the resulting chunks. Default: false
- separators List<String>
- List of separator strings to split text fields by. The separator itself needs to be wrapped in double quotes, e.g. to split by the dot character, use ".". To split by a newline, use "\n".
Import
$ pulumi import airbyte:index/destinationPgvector:DestinationPgvector my_airbyte_destination_pgvector ""
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- airbyte airbytehq/terraform-provider-airbyte
- License
- Notes
- This Pulumi package is based on the
airbyte
Terraform Provider.