AWS v6.60.0 published on Tuesday, Nov 19, 2024 by Pulumi
aws.bedrock.getInferenceProfile
Explore with Pulumi AI
Data source for managing an AWS Bedrock Inference Profile.
Example Usage
Basic Usage
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = aws.bedrock.getInferenceProfiles({});
const testGetInferenceProfile = test.then(test => aws.bedrock.getInferenceProfile({
inferenceProfileId: test.inferenceProfileSummaries?.[0]?.inferenceProfileId,
}));
import pulumi
import pulumi_aws as aws
test = aws.bedrock.get_inference_profiles()
test_get_inference_profile = aws.bedrock.get_inference_profile(inference_profile_id=test.inference_profile_summaries[0].inference_profile_id)
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/bedrock"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
test, err := bedrock.GetInferenceProfiles(ctx, map[string]interface{}{}, nil)
if err != nil {
return err
}
_, err = bedrock.GetInferenceProfile(ctx, &bedrock.GetInferenceProfileArgs{
InferenceProfileId: test.InferenceProfileSummaries[0].InferenceProfileId,
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = Aws.Bedrock.GetInferenceProfiles.Invoke();
var testGetInferenceProfile = Aws.Bedrock.GetInferenceProfile.Invoke(new()
{
InferenceProfileId = test.Apply(getInferenceProfilesResult => getInferenceProfilesResult.InferenceProfileSummaries[0]?.InferenceProfileId),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.bedrock.BedrockFunctions;
import com.pulumi.aws.bedrock.inputs.GetInferenceProfileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var test = BedrockFunctions.getInferenceProfiles();
final var testGetInferenceProfile = BedrockFunctions.getInferenceProfile(GetInferenceProfileArgs.builder()
.inferenceProfileId(test.applyValue(getInferenceProfilesResult -> getInferenceProfilesResult.inferenceProfileSummaries()[0].inferenceProfileId()))
.build());
}
}
variables:
test:
fn::invoke:
Function: aws:bedrock:getInferenceProfiles
Arguments: {}
testGetInferenceProfile:
fn::invoke:
Function: aws:bedrock:getInferenceProfile
Arguments:
inferenceProfileId: ${test.inferenceProfileSummaries[0].inferenceProfileId}
Using getInferenceProfile
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getInferenceProfile(args: GetInferenceProfileArgs, opts?: InvokeOptions): Promise<GetInferenceProfileResult>
function getInferenceProfileOutput(args: GetInferenceProfileOutputArgs, opts?: InvokeOptions): Output<GetInferenceProfileResult>
def get_inference_profile(inference_profile_id: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetInferenceProfileResult
def get_inference_profile_output(inference_profile_id: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetInferenceProfileResult]
func GetInferenceProfile(ctx *Context, args *GetInferenceProfileArgs, opts ...InvokeOption) (*GetInferenceProfileResult, error)
func GetInferenceProfileOutput(ctx *Context, args *GetInferenceProfileOutputArgs, opts ...InvokeOption) GetInferenceProfileResultOutput
> Note: This function is named GetInferenceProfile
in the Go SDK.
public static class GetInferenceProfile
{
public static Task<GetInferenceProfileResult> InvokeAsync(GetInferenceProfileArgs args, InvokeOptions? opts = null)
public static Output<GetInferenceProfileResult> Invoke(GetInferenceProfileInvokeArgs args, InvokeOptions? opts = null)
}
public static CompletableFuture<GetInferenceProfileResult> getInferenceProfile(GetInferenceProfileArgs args, InvokeOptions options)
// Output-based functions aren't available in Java yet
fn::invoke:
function: aws:bedrock/getInferenceProfile:getInferenceProfile
arguments:
# arguments dictionary
The following arguments are supported:
- Inference
Profile stringId - Inference Profile identifier.
- Inference
Profile stringId - Inference Profile identifier.
- inference
Profile StringId - Inference Profile identifier.
- inference
Profile stringId - Inference Profile identifier.
- inference_
profile_ strid - Inference Profile identifier.
- inference
Profile StringId - Inference Profile identifier.
getInferenceProfile Result
The following output properties are available:
- Created
At string - The time at which the inference profile was created.
- Description string
- The description of the inference profile.
- Id string
- The provider-assigned unique ID for this managed resource.
- Inference
Profile stringArn - The Amazon Resource Name (ARN) of the inference profile.
- Inference
Profile stringId - Inference
Profile stringName - The unique identifier of the inference profile.
- Models
List<Get
Inference Profile Model> - A list of information about each model in the inference profile. See
models
. - Status string
- The status of the inference profile.
ACTIVE
means that the inference profile is available to use. - Type string
- The type of the inference profile.
SYSTEM_DEFINED
means that the inference profile is defined by Amazon Bedrock. - Updated
At string - The time at which the inference profile was last updated.
- Created
At string - The time at which the inference profile was created.
- Description string
- The description of the inference profile.
- Id string
- The provider-assigned unique ID for this managed resource.
- Inference
Profile stringArn - The Amazon Resource Name (ARN) of the inference profile.
- Inference
Profile stringId - Inference
Profile stringName - The unique identifier of the inference profile.
- Models
[]Get
Inference Profile Model - A list of information about each model in the inference profile. See
models
. - Status string
- The status of the inference profile.
ACTIVE
means that the inference profile is available to use. - Type string
- The type of the inference profile.
SYSTEM_DEFINED
means that the inference profile is defined by Amazon Bedrock. - Updated
At string - The time at which the inference profile was last updated.
- created
At String - The time at which the inference profile was created.
- description String
- The description of the inference profile.
- id String
- The provider-assigned unique ID for this managed resource.
- inference
Profile StringArn - The Amazon Resource Name (ARN) of the inference profile.
- inference
Profile StringId - inference
Profile StringName - The unique identifier of the inference profile.
- models
List<Get
Inference Profile Model> - A list of information about each model in the inference profile. See
models
. - status String
- The status of the inference profile.
ACTIVE
means that the inference profile is available to use. - type String
- The type of the inference profile.
SYSTEM_DEFINED
means that the inference profile is defined by Amazon Bedrock. - updated
At String - The time at which the inference profile was last updated.
- created
At string - The time at which the inference profile was created.
- description string
- The description of the inference profile.
- id string
- The provider-assigned unique ID for this managed resource.
- inference
Profile stringArn - The Amazon Resource Name (ARN) of the inference profile.
- inference
Profile stringId - inference
Profile stringName - The unique identifier of the inference profile.
- models
Get
Inference Profile Model[] - A list of information about each model in the inference profile. See
models
. - status string
- The status of the inference profile.
ACTIVE
means that the inference profile is available to use. - type string
- The type of the inference profile.
SYSTEM_DEFINED
means that the inference profile is defined by Amazon Bedrock. - updated
At string - The time at which the inference profile was last updated.
- created_
at str - The time at which the inference profile was created.
- description str
- The description of the inference profile.
- id str
- The provider-assigned unique ID for this managed resource.
- inference_
profile_ strarn - The Amazon Resource Name (ARN) of the inference profile.
- inference_
profile_ strid - inference_
profile_ strname - The unique identifier of the inference profile.
- models
Sequence[Get
Inference Profile Model] - A list of information about each model in the inference profile. See
models
. - status str
- The status of the inference profile.
ACTIVE
means that the inference profile is available to use. - type str
- The type of the inference profile.
SYSTEM_DEFINED
means that the inference profile is defined by Amazon Bedrock. - updated_
at str - The time at which the inference profile was last updated.
- created
At String - The time at which the inference profile was created.
- description String
- The description of the inference profile.
- id String
- The provider-assigned unique ID for this managed resource.
- inference
Profile StringArn - The Amazon Resource Name (ARN) of the inference profile.
- inference
Profile StringId - inference
Profile StringName - The unique identifier of the inference profile.
- models List<Property Map>
- A list of information about each model in the inference profile. See
models
. - status String
- The status of the inference profile.
ACTIVE
means that the inference profile is available to use. - type String
- The type of the inference profile.
SYSTEM_DEFINED
means that the inference profile is defined by Amazon Bedrock. - updated
At String - The time at which the inference profile was last updated.
Supporting Types
GetInferenceProfileModel
- Model
Arn string - The Amazon Resource Name (ARN) of the model.
- Model
Arn string - The Amazon Resource Name (ARN) of the model.
- model
Arn String - The Amazon Resource Name (ARN) of the model.
- model
Arn string - The Amazon Resource Name (ARN) of the model.
- model_
arn str - The Amazon Resource Name (ARN) of the model.
- model
Arn String - The Amazon Resource Name (ARN) of the model.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.