datarobot.LlmBlueprint
Explore with Pulumi AI
LLMBlueprint
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as datarobot from "@datarobot/pulumi-datarobot";
const exampleUseCase = new datarobot.UseCase("exampleUseCase", {});
const examplePlayground = new datarobot.Playground("examplePlayground", {
description: "Description for the example playground",
useCaseId: exampleUseCase.id,
});
const exampleLlmBlueprint = new datarobot.LlmBlueprint("exampleLlmBlueprint", {
description: "Description for the example LLM blueprint",
playgroundId: examplePlayground.id,
llmId: "azure-openai-gpt-3.5-turbo",
promptType: "ONE_TIME_PROMPT",
});
// Optional
// llm_settings {
// max_completion_length = 1000
// temperature = 0.5
// top_p = 0.9
// system_prompt = "My Prompt:"
// }
// vector_database_settings = {
// max_documents_retrieved_per_prompt = 5
// max_tokens = 1000
// }
export const exampleId = exampleLlmBlueprint.id;
import pulumi
import pulumi_datarobot as datarobot
example_use_case = datarobot.UseCase("exampleUseCase")
example_playground = datarobot.Playground("examplePlayground",
description="Description for the example playground",
use_case_id=example_use_case.id)
example_llm_blueprint = datarobot.LlmBlueprint("exampleLlmBlueprint",
description="Description for the example LLM blueprint",
playground_id=example_playground.id,
llm_id="azure-openai-gpt-3.5-turbo",
prompt_type="ONE_TIME_PROMPT")
# Optional
# llm_settings {
# max_completion_length = 1000
# temperature = 0.5
# top_p = 0.9
# system_prompt = "My Prompt:"
# }
# vector_database_settings = {
# max_documents_retrieved_per_prompt = 5
# max_tokens = 1000
# }
pulumi.export("exampleId", example_llm_blueprint.id)
package main
import (
"github.com/datarobot-community/pulumi-datarobot/sdk/go/datarobot"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
exampleUseCase, err := datarobot.NewUseCase(ctx, "exampleUseCase", nil)
if err != nil {
return err
}
examplePlayground, err := datarobot.NewPlayground(ctx, "examplePlayground", &datarobot.PlaygroundArgs{
Description: pulumi.String("Description for the example playground"),
UseCaseId: exampleUseCase.ID(),
})
if err != nil {
return err
}
exampleLlmBlueprint, err := datarobot.NewLlmBlueprint(ctx, "exampleLlmBlueprint", &datarobot.LlmBlueprintArgs{
Description: pulumi.String("Description for the example LLM blueprint"),
PlaygroundId: examplePlayground.ID(),
LlmId: pulumi.String("azure-openai-gpt-3.5-turbo"),
PromptType: pulumi.String("ONE_TIME_PROMPT"),
})
if err != nil {
return err
}
ctx.Export("exampleId", exampleLlmBlueprint.ID())
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Datarobot = DataRobotPulumi.Datarobot;
return await Deployment.RunAsync(() =>
{
var exampleUseCase = new Datarobot.UseCase("exampleUseCase");
var examplePlayground = new Datarobot.Playground("examplePlayground", new()
{
Description = "Description for the example playground",
UseCaseId = exampleUseCase.Id,
});
var exampleLlmBlueprint = new Datarobot.LlmBlueprint("exampleLlmBlueprint", new()
{
Description = "Description for the example LLM blueprint",
PlaygroundId = examplePlayground.Id,
LlmId = "azure-openai-gpt-3.5-turbo",
PromptType = "ONE_TIME_PROMPT",
});
// Optional
// llm_settings {
// max_completion_length = 1000
// temperature = 0.5
// top_p = 0.9
// system_prompt = "My Prompt:"
// }
// vector_database_settings = {
// max_documents_retrieved_per_prompt = 5
// max_tokens = 1000
// }
return new Dictionary<string, object?>
{
["exampleId"] = exampleLlmBlueprint.Id,
};
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.datarobot.UseCase;
import com.pulumi.datarobot.Playground;
import com.pulumi.datarobot.PlaygroundArgs;
import com.pulumi.datarobot.LlmBlueprint;
import com.pulumi.datarobot.LlmBlueprintArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var exampleUseCase = new UseCase("exampleUseCase");
var examplePlayground = new Playground("examplePlayground", PlaygroundArgs.builder()
.description("Description for the example playground")
.useCaseId(exampleUseCase.id())
.build());
var exampleLlmBlueprint = new LlmBlueprint("exampleLlmBlueprint", LlmBlueprintArgs.builder()
.description("Description for the example LLM blueprint")
.playgroundId(examplePlayground.id())
.llmId("azure-openai-gpt-3.5-turbo")
.promptType("ONE_TIME_PROMPT")
.build());
// Optional
// llm_settings {
// max_completion_length = 1000
// temperature = 0.5
// top_p = 0.9
// system_prompt = "My Prompt:"
// }
// vector_database_settings = {
// max_documents_retrieved_per_prompt = 5
// max_tokens = 1000
// }
ctx.export("exampleId", exampleLlmBlueprint.id());
}
}
resources:
exampleUseCase:
type: datarobot:UseCase
examplePlayground:
type: datarobot:Playground
properties:
description: Description for the example playground
useCaseId: ${exampleUseCase.id}
exampleLlmBlueprint:
type: datarobot:LlmBlueprint
properties:
description: Description for the example LLM blueprint
playgroundId: ${examplePlayground.id}
llmId: azure-openai-gpt-3.5-turbo
promptType: ONE_TIME_PROMPT
outputs:
exampleId: ${exampleLlmBlueprint.id}
Create LlmBlueprint Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new LlmBlueprint(name: string, args: LlmBlueprintArgs, opts?: CustomResourceOptions);
@overload
def LlmBlueprint(resource_name: str,
args: LlmBlueprintArgs,
opts: Optional[ResourceOptions] = None)
@overload
def LlmBlueprint(resource_name: str,
opts: Optional[ResourceOptions] = None,
llm_id: Optional[str] = None,
playground_id: Optional[str] = None,
description: Optional[str] = None,
llm_settings: Optional[LlmBlueprintLlmSettingsArgs] = None,
name: Optional[str] = None,
prompt_type: Optional[str] = None,
vector_database_id: Optional[str] = None,
vector_database_settings: Optional[LlmBlueprintVectorDatabaseSettingsArgs] = None)
func NewLlmBlueprint(ctx *Context, name string, args LlmBlueprintArgs, opts ...ResourceOption) (*LlmBlueprint, error)
public LlmBlueprint(string name, LlmBlueprintArgs args, CustomResourceOptions? opts = null)
public LlmBlueprint(String name, LlmBlueprintArgs args)
public LlmBlueprint(String name, LlmBlueprintArgs args, CustomResourceOptions options)
type: datarobot:LlmBlueprint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args LlmBlueprintArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args LlmBlueprintArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args LlmBlueprintArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args LlmBlueprintArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args LlmBlueprintArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var llmBlueprintResource = new Datarobot.LlmBlueprint("llmBlueprintResource", new()
{
LlmId = "string",
PlaygroundId = "string",
Description = "string",
LlmSettings = new Datarobot.Inputs.LlmBlueprintLlmSettingsArgs
{
MaxCompletionLength = 0,
SystemPrompt = "string",
Temperature = 0,
TopP = 0,
},
Name = "string",
PromptType = "string",
VectorDatabaseId = "string",
VectorDatabaseSettings = new Datarobot.Inputs.LlmBlueprintVectorDatabaseSettingsArgs
{
MaxDocumentsRetrievedPerPrompt = 0,
MaxTokens = 0,
},
});
example, err := datarobot.NewLlmBlueprint(ctx, "llmBlueprintResource", &datarobot.LlmBlueprintArgs{
LlmId: pulumi.String("string"),
PlaygroundId: pulumi.String("string"),
Description: pulumi.String("string"),
LlmSettings: &datarobot.LlmBlueprintLlmSettingsArgs{
MaxCompletionLength: pulumi.Int(0),
SystemPrompt: pulumi.String("string"),
Temperature: pulumi.Float64(0),
TopP: pulumi.Float64(0),
},
Name: pulumi.String("string"),
PromptType: pulumi.String("string"),
VectorDatabaseId: pulumi.String("string"),
VectorDatabaseSettings: &datarobot.LlmBlueprintVectorDatabaseSettingsArgs{
MaxDocumentsRetrievedPerPrompt: pulumi.Int(0),
MaxTokens: pulumi.Int(0),
},
})
var llmBlueprintResource = new LlmBlueprint("llmBlueprintResource", LlmBlueprintArgs.builder()
.llmId("string")
.playgroundId("string")
.description("string")
.llmSettings(LlmBlueprintLlmSettingsArgs.builder()
.maxCompletionLength(0)
.systemPrompt("string")
.temperature(0)
.topP(0)
.build())
.name("string")
.promptType("string")
.vectorDatabaseId("string")
.vectorDatabaseSettings(LlmBlueprintVectorDatabaseSettingsArgs.builder()
.maxDocumentsRetrievedPerPrompt(0)
.maxTokens(0)
.build())
.build());
llm_blueprint_resource = datarobot.LlmBlueprint("llmBlueprintResource",
llm_id="string",
playground_id="string",
description="string",
llm_settings={
"max_completion_length": 0,
"system_prompt": "string",
"temperature": 0,
"top_p": 0,
},
name="string",
prompt_type="string",
vector_database_id="string",
vector_database_settings={
"max_documents_retrieved_per_prompt": 0,
"max_tokens": 0,
})
const llmBlueprintResource = new datarobot.LlmBlueprint("llmBlueprintResource", {
llmId: "string",
playgroundId: "string",
description: "string",
llmSettings: {
maxCompletionLength: 0,
systemPrompt: "string",
temperature: 0,
topP: 0,
},
name: "string",
promptType: "string",
vectorDatabaseId: "string",
vectorDatabaseSettings: {
maxDocumentsRetrievedPerPrompt: 0,
maxTokens: 0,
},
});
type: datarobot:LlmBlueprint
properties:
description: string
llmId: string
llmSettings:
maxCompletionLength: 0
systemPrompt: string
temperature: 0
topP: 0
name: string
playgroundId: string
promptType: string
vectorDatabaseId: string
vectorDatabaseSettings:
maxDocumentsRetrievedPerPrompt: 0
maxTokens: 0
LlmBlueprint Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The LlmBlueprint resource accepts the following input properties:
- Llm
Id string - The id of the LLM for the LLM Blueprint.
- Playground
Id string - The id of the Playground for the LLM Blueprint.
- Description string
- The description of the LLM Blueprint.
- Llm
Settings DataRobot Llm Blueprint Llm Settings - The LLM settings for the LLM Blueprint.
- Name string
- The name of the LLM Blueprint.
- Prompt
Type string - The prompt type for the LLM Blueprint.
- Vector
Database stringId - The id of the Vector Database for the LLM Blueprint.
- Vector
Database DataSettings Robot Llm Blueprint Vector Database Settings - The Vector Database settings for the LLM Blueprint.
- Llm
Id string - The id of the LLM for the LLM Blueprint.
- Playground
Id string - The id of the Playground for the LLM Blueprint.
- Description string
- The description of the LLM Blueprint.
- Llm
Settings LlmBlueprint Llm Settings Args - The LLM settings for the LLM Blueprint.
- Name string
- The name of the LLM Blueprint.
- Prompt
Type string - The prompt type for the LLM Blueprint.
- Vector
Database stringId - The id of the Vector Database for the LLM Blueprint.
- Vector
Database LlmSettings Blueprint Vector Database Settings Args - The Vector Database settings for the LLM Blueprint.
- llm
Id String - The id of the LLM for the LLM Blueprint.
- playground
Id String - The id of the Playground for the LLM Blueprint.
- description String
- The description of the LLM Blueprint.
- llm
Settings LlmBlueprint Llm Settings - The LLM settings for the LLM Blueprint.
- name String
- The name of the LLM Blueprint.
- prompt
Type String - The prompt type for the LLM Blueprint.
- vector
Database StringId - The id of the Vector Database for the LLM Blueprint.
- vector
Database LlmSettings Blueprint Vector Database Settings - The Vector Database settings for the LLM Blueprint.
- llm
Id string - The id of the LLM for the LLM Blueprint.
- playground
Id string - The id of the Playground for the LLM Blueprint.
- description string
- The description of the LLM Blueprint.
- llm
Settings LlmBlueprint Llm Settings - The LLM settings for the LLM Blueprint.
- name string
- The name of the LLM Blueprint.
- prompt
Type string - The prompt type for the LLM Blueprint.
- vector
Database stringId - The id of the Vector Database for the LLM Blueprint.
- vector
Database LlmSettings Blueprint Vector Database Settings - The Vector Database settings for the LLM Blueprint.
- llm_
id str - The id of the LLM for the LLM Blueprint.
- playground_
id str - The id of the Playground for the LLM Blueprint.
- description str
- The description of the LLM Blueprint.
- llm_
settings LlmBlueprint Llm Settings Args - The LLM settings for the LLM Blueprint.
- name str
- The name of the LLM Blueprint.
- prompt_
type str - The prompt type for the LLM Blueprint.
- vector_
database_ strid - The id of the Vector Database for the LLM Blueprint.
- vector_
database_ Llmsettings Blueprint Vector Database Settings Args - The Vector Database settings for the LLM Blueprint.
- llm
Id String - The id of the LLM for the LLM Blueprint.
- playground
Id String - The id of the Playground for the LLM Blueprint.
- description String
- The description of the LLM Blueprint.
- llm
Settings Property Map - The LLM settings for the LLM Blueprint.
- name String
- The name of the LLM Blueprint.
- prompt
Type String - The prompt type for the LLM Blueprint.
- vector
Database StringId - The id of the Vector Database for the LLM Blueprint.
- vector
Database Property MapSettings - The Vector Database settings for the LLM Blueprint.
Outputs
All input properties are implicitly available as output properties. Additionally, the LlmBlueprint resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing LlmBlueprint Resource
Get an existing LlmBlueprint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: LlmBlueprintState, opts?: CustomResourceOptions): LlmBlueprint
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
description: Optional[str] = None,
llm_id: Optional[str] = None,
llm_settings: Optional[LlmBlueprintLlmSettingsArgs] = None,
name: Optional[str] = None,
playground_id: Optional[str] = None,
prompt_type: Optional[str] = None,
vector_database_id: Optional[str] = None,
vector_database_settings: Optional[LlmBlueprintVectorDatabaseSettingsArgs] = None) -> LlmBlueprint
func GetLlmBlueprint(ctx *Context, name string, id IDInput, state *LlmBlueprintState, opts ...ResourceOption) (*LlmBlueprint, error)
public static LlmBlueprint Get(string name, Input<string> id, LlmBlueprintState? state, CustomResourceOptions? opts = null)
public static LlmBlueprint get(String name, Output<String> id, LlmBlueprintState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Description string
- The description of the LLM Blueprint.
- Llm
Id string - The id of the LLM for the LLM Blueprint.
- Llm
Settings DataRobot Llm Blueprint Llm Settings - The LLM settings for the LLM Blueprint.
- Name string
- The name of the LLM Blueprint.
- Playground
Id string - The id of the Playground for the LLM Blueprint.
- Prompt
Type string - The prompt type for the LLM Blueprint.
- Vector
Database stringId - The id of the Vector Database for the LLM Blueprint.
- Vector
Database DataSettings Robot Llm Blueprint Vector Database Settings - The Vector Database settings for the LLM Blueprint.
- Description string
- The description of the LLM Blueprint.
- Llm
Id string - The id of the LLM for the LLM Blueprint.
- Llm
Settings LlmBlueprint Llm Settings Args - The LLM settings for the LLM Blueprint.
- Name string
- The name of the LLM Blueprint.
- Playground
Id string - The id of the Playground for the LLM Blueprint.
- Prompt
Type string - The prompt type for the LLM Blueprint.
- Vector
Database stringId - The id of the Vector Database for the LLM Blueprint.
- Vector
Database LlmSettings Blueprint Vector Database Settings Args - The Vector Database settings for the LLM Blueprint.
- description String
- The description of the LLM Blueprint.
- llm
Id String - The id of the LLM for the LLM Blueprint.
- llm
Settings LlmBlueprint Llm Settings - The LLM settings for the LLM Blueprint.
- name String
- The name of the LLM Blueprint.
- playground
Id String - The id of the Playground for the LLM Blueprint.
- prompt
Type String - The prompt type for the LLM Blueprint.
- vector
Database StringId - The id of the Vector Database for the LLM Blueprint.
- vector
Database LlmSettings Blueprint Vector Database Settings - The Vector Database settings for the LLM Blueprint.
- description string
- The description of the LLM Blueprint.
- llm
Id string - The id of the LLM for the LLM Blueprint.
- llm
Settings LlmBlueprint Llm Settings - The LLM settings for the LLM Blueprint.
- name string
- The name of the LLM Blueprint.
- playground
Id string - The id of the Playground for the LLM Blueprint.
- prompt
Type string - The prompt type for the LLM Blueprint.
- vector
Database stringId - The id of the Vector Database for the LLM Blueprint.
- vector
Database LlmSettings Blueprint Vector Database Settings - The Vector Database settings for the LLM Blueprint.
- description str
- The description of the LLM Blueprint.
- llm_
id str - The id of the LLM for the LLM Blueprint.
- llm_
settings LlmBlueprint Llm Settings Args - The LLM settings for the LLM Blueprint.
- name str
- The name of the LLM Blueprint.
- playground_
id str - The id of the Playground for the LLM Blueprint.
- prompt_
type str - The prompt type for the LLM Blueprint.
- vector_
database_ strid - The id of the Vector Database for the LLM Blueprint.
- vector_
database_ Llmsettings Blueprint Vector Database Settings Args - The Vector Database settings for the LLM Blueprint.
- description String
- The description of the LLM Blueprint.
- llm
Id String - The id of the LLM for the LLM Blueprint.
- llm
Settings Property Map - The LLM settings for the LLM Blueprint.
- name String
- The name of the LLM Blueprint.
- playground
Id String - The id of the Playground for the LLM Blueprint.
- prompt
Type String - The prompt type for the LLM Blueprint.
- vector
Database StringId - The id of the Vector Database for the LLM Blueprint.
- vector
Database Property MapSettings - The Vector Database settings for the LLM Blueprint.
Supporting Types
LlmBlueprintLlmSettings, LlmBlueprintLlmSettingsArgs
- Max
Completion intLength - The maximum number of tokens allowed in the completion. The combined count of this value and prompt tokens must be below the model's maximum context size, where prompt token count is comprised of system prompt, user prompt, recent chat history, and vector database citations.
- System
Prompt string - Guides the style of the LLM response. It is a 'universal' prompt, prepended to all individual prompts.
- Temperature double
- Controls the randomness of model output, where higher values return more diverse output and lower values return more deterministic results.
- Top
P double - Threshold that controls the selection of words included in the response, based on a cumulative probability cutoff for token selection. Higher numbers return more diverse options for outputs.
- Max
Completion intLength - The maximum number of tokens allowed in the completion. The combined count of this value and prompt tokens must be below the model's maximum context size, where prompt token count is comprised of system prompt, user prompt, recent chat history, and vector database citations.
- System
Prompt string - Guides the style of the LLM response. It is a 'universal' prompt, prepended to all individual prompts.
- Temperature float64
- Controls the randomness of model output, where higher values return more diverse output and lower values return more deterministic results.
- Top
P float64 - Threshold that controls the selection of words included in the response, based on a cumulative probability cutoff for token selection. Higher numbers return more diverse options for outputs.
- max
Completion IntegerLength - The maximum number of tokens allowed in the completion. The combined count of this value and prompt tokens must be below the model's maximum context size, where prompt token count is comprised of system prompt, user prompt, recent chat history, and vector database citations.
- system
Prompt String - Guides the style of the LLM response. It is a 'universal' prompt, prepended to all individual prompts.
- temperature Double
- Controls the randomness of model output, where higher values return more diverse output and lower values return more deterministic results.
- top
P Double - Threshold that controls the selection of words included in the response, based on a cumulative probability cutoff for token selection. Higher numbers return more diverse options for outputs.
- max
Completion numberLength - The maximum number of tokens allowed in the completion. The combined count of this value and prompt tokens must be below the model's maximum context size, where prompt token count is comprised of system prompt, user prompt, recent chat history, and vector database citations.
- system
Prompt string - Guides the style of the LLM response. It is a 'universal' prompt, prepended to all individual prompts.
- temperature number
- Controls the randomness of model output, where higher values return more diverse output and lower values return more deterministic results.
- top
P number - Threshold that controls the selection of words included in the response, based on a cumulative probability cutoff for token selection. Higher numbers return more diverse options for outputs.
- max_
completion_ intlength - The maximum number of tokens allowed in the completion. The combined count of this value and prompt tokens must be below the model's maximum context size, where prompt token count is comprised of system prompt, user prompt, recent chat history, and vector database citations.
- system_
prompt str - Guides the style of the LLM response. It is a 'universal' prompt, prepended to all individual prompts.
- temperature float
- Controls the randomness of model output, where higher values return more diverse output and lower values return more deterministic results.
- top_
p float - Threshold that controls the selection of words included in the response, based on a cumulative probability cutoff for token selection. Higher numbers return more diverse options for outputs.
- max
Completion NumberLength - The maximum number of tokens allowed in the completion. The combined count of this value and prompt tokens must be below the model's maximum context size, where prompt token count is comprised of system prompt, user prompt, recent chat history, and vector database citations.
- system
Prompt String - Guides the style of the LLM response. It is a 'universal' prompt, prepended to all individual prompts.
- temperature Number
- Controls the randomness of model output, where higher values return more diverse output and lower values return more deterministic results.
- top
P Number - Threshold that controls the selection of words included in the response, based on a cumulative probability cutoff for token selection. Higher numbers return more diverse options for outputs.
LlmBlueprintVectorDatabaseSettings, LlmBlueprintVectorDatabaseSettingsArgs
- Max
Documents intRetrieved Per Prompt - The maximum number of documents to retrieve from the Vector Database.
- Max
Tokens int - The maximum number of tokens to retrieve from the Vector Database.
- Max
Documents intRetrieved Per Prompt - The maximum number of documents to retrieve from the Vector Database.
- Max
Tokens int - The maximum number of tokens to retrieve from the Vector Database.
- max
Documents IntegerRetrieved Per Prompt - The maximum number of documents to retrieve from the Vector Database.
- max
Tokens Integer - The maximum number of tokens to retrieve from the Vector Database.
- max
Documents numberRetrieved Per Prompt - The maximum number of documents to retrieve from the Vector Database.
- max
Tokens number - The maximum number of tokens to retrieve from the Vector Database.
- max_
documents_ intretrieved_ per_ prompt - The maximum number of documents to retrieve from the Vector Database.
- max_
tokens int - The maximum number of tokens to retrieve from the Vector Database.
- max
Documents NumberRetrieved Per Prompt - The maximum number of documents to retrieve from the Vector Database.
- max
Tokens Number - The maximum number of tokens to retrieve from the Vector Database.
Package Details
- Repository
- datarobot datarobot-community/pulumi-datarobot
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
datarobot
Terraform Provider.