konnect.GatewayPluginAiRequestTransformer
Explore with Pulumi AI
GatewayPluginAiRequestTransformer Resource
Example Usage
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.konnect.GatewayPluginAiRequestTransformer;
import com.pulumi.konnect.GatewayPluginAiRequestTransformerArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmAuthArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmLoggingArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmModelArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsBedrockArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsGeminiArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsHuggingfaceArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerConsumerGroupArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerOrderingArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerOrderingAfterArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerOrderingBeforeArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerRouteArgs;
import com.pulumi.konnect.inputs.GatewayPluginAiRequestTransformerServiceArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var myGatewaypluginairequesttransformer = new GatewayPluginAiRequestTransformer("myGatewaypluginairequesttransformer", GatewayPluginAiRequestTransformerArgs.builder()
.config(GatewayPluginAiRequestTransformerConfigArgs.builder()
.http_proxy_host("...my_http_proxy_host...")
.http_proxy_port(19860)
.http_timeout(10)
.https_proxy_host("...my_https_proxy_host...")
.https_proxy_port(20590)
.https_verify(false)
.llm(GatewayPluginAiRequestTransformerConfigLlmArgs.builder()
.auth(GatewayPluginAiRequestTransformerConfigLlmAuthArgs.builder()
.allowOverride(false)
.awsAccessKeyId("...my_aws_access_key_id...")
.awsSecretAccessKey("...my_aws_secret_access_key...")
.azureClientId("...my_azure_client_id...")
.azureClientSecret("...my_azure_client_secret...")
.azureTenantId("...my_azure_tenant_id...")
.azureUseManagedIdentity(true)
.gcpServiceAccountJson("...my_gcp_service_account_json...")
.gcpUseServiceAccount(true)
.headerName("...my_header_name...")
.headerValue("...my_header_value...")
.paramLocation("query")
.paramName("...my_param_name...")
.paramValue("...my_param_value...")
.build())
.logging(GatewayPluginAiRequestTransformerConfigLlmLoggingArgs.builder()
.logPayloads(false)
.logStatistics(false)
.build())
.model(GatewayPluginAiRequestTransformerConfigLlmModelArgs.builder()
.name("...my_name...")
.options(GatewayPluginAiRequestTransformerConfigLlmModelOptionsArgs.builder()
.anthropicVersion("...my_anthropic_version...")
.azureApiVersion("...my_azure_api_version...")
.azureDeploymentId("...my_azure_deployment_id...")
.azureInstance("...my_azure_instance...")
.bedrock(GatewayPluginAiRequestTransformerConfigLlmModelOptionsBedrockArgs.builder()
.awsRegion("...my_aws_region...")
.build())
.gemini(GatewayPluginAiRequestTransformerConfigLlmModelOptionsGeminiArgs.builder()
.apiEndpoint("...my_api_endpoint...")
.locationId("...my_location_id...")
.projectId("...my_project_id...")
.build())
.huggingface(GatewayPluginAiRequestTransformerConfigLlmModelOptionsHuggingfaceArgs.builder()
.useCache(false)
.waitForModel(true)
.build())
.inputCost(6.37)
.llama2Format("ollama")
.maxTokens(5)
.mistralFormat("ollama")
.outputCost(8.25)
.temperature(0.7)
.topK(420)
.topP(0.54)
.upstreamPath("...my_upstream_path...")
.upstreamUrl("...my_upstream_url...")
.build())
.provider("mistral")
.build())
.routeType("preserve")
.build())
.max_request_body_size(7)
.prompt("...my_prompt...")
.transformation_extract_pattern("...my_transformation_extract_pattern...")
.build())
.consumerGroup(GatewayPluginAiRequestTransformerConsumerGroupArgs.builder()
.id("...my_id...")
.build())
.controlPlaneId("9524ec7d-36d9-465d-a8c5-83a3c9390458")
.enabled(true)
.gatewayPluginAiRequestTransformerId("...my_id...")
.instanceName("...my_instance_name...")
.ordering(GatewayPluginAiRequestTransformerOrderingArgs.builder()
.after(GatewayPluginAiRequestTransformerOrderingAfterArgs.builder()
.access("...")
.build())
.before(GatewayPluginAiRequestTransformerOrderingBeforeArgs.builder()
.access("...")
.build())
.build())
.protocols("http")
.route(GatewayPluginAiRequestTransformerRouteArgs.builder()
.id("...my_id...")
.build())
.service(GatewayPluginAiRequestTransformerServiceArgs.builder()
.id("...my_id...")
.build())
.tags("...")
.build());
}
}
resources:
myGatewaypluginairequesttransformer:
type: konnect:GatewayPluginAiRequestTransformer
properties:
config:
http_proxy_host: '...my_http_proxy_host...'
http_proxy_port: 19860
http_timeout: 10
https_proxy_host: '...my_https_proxy_host...'
https_proxy_port: 20590
https_verify: false
llm:
auth:
allowOverride: false
awsAccessKeyId: '...my_aws_access_key_id...'
awsSecretAccessKey: '...my_aws_secret_access_key...'
azureClientId: '...my_azure_client_id...'
azureClientSecret: '...my_azure_client_secret...'
azureTenantId: '...my_azure_tenant_id...'
azureUseManagedIdentity: true
gcpServiceAccountJson: '...my_gcp_service_account_json...'
gcpUseServiceAccount: true
headerName: '...my_header_name...'
headerValue: '...my_header_value...'
paramLocation: query
paramName: '...my_param_name...'
paramValue: '...my_param_value...'
logging:
logPayloads: false
logStatistics: false
model:
name: '...my_name...'
options:
anthropicVersion: '...my_anthropic_version...'
azureApiVersion: '...my_azure_api_version...'
azureDeploymentId: '...my_azure_deployment_id...'
azureInstance: '...my_azure_instance...'
bedrock:
awsRegion: '...my_aws_region...'
gemini:
apiEndpoint: '...my_api_endpoint...'
locationId: '...my_location_id...'
projectId: '...my_project_id...'
huggingface:
useCache: false
waitForModel: true
inputCost: 6.37
llama2Format: ollama
maxTokens: 5
mistralFormat: ollama
outputCost: 8.25
temperature: 0.7
topK: 420
topP: 0.54
upstreamPath: '...my_upstream_path...'
upstreamUrl: '...my_upstream_url...'
provider: mistral
routeType: preserve
max_request_body_size: 7
prompt: '...my_prompt...'
transformation_extract_pattern: '...my_transformation_extract_pattern...'
consumerGroup:
id: '...my_id...'
controlPlaneId: 9524ec7d-36d9-465d-a8c5-83a3c9390458
enabled: true
gatewayPluginAiRequestTransformerId: '...my_id...'
instanceName: '...my_instance_name...'
ordering:
after:
access:
- '...'
before:
access:
- '...'
protocols:
- http
route:
id: '...my_id...'
service:
id: '...my_id...'
tags:
- '...'
Create GatewayPluginAiRequestTransformer Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new GatewayPluginAiRequestTransformer(name: string, args: GatewayPluginAiRequestTransformerArgs, opts?: CustomResourceOptions);
@overload
def GatewayPluginAiRequestTransformer(resource_name: str,
args: GatewayPluginAiRequestTransformerArgs,
opts: Optional[ResourceOptions] = None)
@overload
def GatewayPluginAiRequestTransformer(resource_name: str,
opts: Optional[ResourceOptions] = None,
config: Optional[GatewayPluginAiRequestTransformerConfigArgs] = None,
control_plane_id: Optional[str] = None,
consumer_group: Optional[GatewayPluginAiRequestTransformerConsumerGroupArgs] = None,
enabled: Optional[bool] = None,
gateway_plugin_ai_request_transformer_id: Optional[str] = None,
instance_name: Optional[str] = None,
ordering: Optional[GatewayPluginAiRequestTransformerOrderingArgs] = None,
protocols: Optional[Sequence[str]] = None,
route: Optional[GatewayPluginAiRequestTransformerRouteArgs] = None,
service: Optional[GatewayPluginAiRequestTransformerServiceArgs] = None,
tags: Optional[Sequence[str]] = None)
func NewGatewayPluginAiRequestTransformer(ctx *Context, name string, args GatewayPluginAiRequestTransformerArgs, opts ...ResourceOption) (*GatewayPluginAiRequestTransformer, error)
public GatewayPluginAiRequestTransformer(string name, GatewayPluginAiRequestTransformerArgs args, CustomResourceOptions? opts = null)
public GatewayPluginAiRequestTransformer(String name, GatewayPluginAiRequestTransformerArgs args)
public GatewayPluginAiRequestTransformer(String name, GatewayPluginAiRequestTransformerArgs args, CustomResourceOptions options)
type: konnect:GatewayPluginAiRequestTransformer
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args GatewayPluginAiRequestTransformerArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args GatewayPluginAiRequestTransformerArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args GatewayPluginAiRequestTransformerArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args GatewayPluginAiRequestTransformerArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args GatewayPluginAiRequestTransformerArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var gatewayPluginAiRequestTransformerResource = new Konnect.GatewayPluginAiRequestTransformer("gatewayPluginAiRequestTransformerResource", new()
{
Config = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigArgs
{
HttpProxyHost = "string",
HttpProxyPort = 0,
HttpTimeout = 0,
HttpsProxyHost = "string",
HttpsProxyPort = 0,
HttpsVerify = false,
Llm = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmArgs
{
Auth = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmAuthArgs
{
AllowOverride = false,
AwsAccessKeyId = "string",
AwsSecretAccessKey = "string",
AzureClientId = "string",
AzureClientSecret = "string",
AzureTenantId = "string",
AzureUseManagedIdentity = false,
GcpServiceAccountJson = "string",
GcpUseServiceAccount = false,
HeaderName = "string",
HeaderValue = "string",
ParamLocation = "string",
ParamName = "string",
ParamValue = "string",
},
Logging = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmLoggingArgs
{
LogPayloads = false,
LogStatistics = false,
},
Model = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmModelArgs
{
Name = "string",
Options = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsArgs
{
AnthropicVersion = "string",
AzureApiVersion = "string",
AzureDeploymentId = "string",
AzureInstance = "string",
Bedrock = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsBedrockArgs
{
AwsRegion = "string",
},
Gemini = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsGeminiArgs
{
ApiEndpoint = "string",
LocationId = "string",
ProjectId = "string",
},
Huggingface = new Konnect.Inputs.GatewayPluginAiRequestTransformerConfigLlmModelOptionsHuggingfaceArgs
{
UseCache = false,
WaitForModel = false,
},
InputCost = 0,
Llama2Format = "string",
MaxTokens = 0,
MistralFormat = "string",
OutputCost = 0,
Temperature = 0,
TopK = 0,
TopP = 0,
UpstreamPath = "string",
UpstreamUrl = "string",
},
Provider = "string",
},
RouteType = "string",
},
MaxRequestBodySize = 0,
Prompt = "string",
TransformationExtractPattern = "string",
},
ControlPlaneId = "string",
ConsumerGroup = new Konnect.Inputs.GatewayPluginAiRequestTransformerConsumerGroupArgs
{
Id = "string",
},
Enabled = false,
GatewayPluginAiRequestTransformerId = "string",
InstanceName = "string",
Ordering = new Konnect.Inputs.GatewayPluginAiRequestTransformerOrderingArgs
{
After = new Konnect.Inputs.GatewayPluginAiRequestTransformerOrderingAfterArgs
{
Accesses = new[]
{
"string",
},
},
Before = new Konnect.Inputs.GatewayPluginAiRequestTransformerOrderingBeforeArgs
{
Accesses = new[]
{
"string",
},
},
},
Protocols = new[]
{
"string",
},
Route = new Konnect.Inputs.GatewayPluginAiRequestTransformerRouteArgs
{
Id = "string",
},
Service = new Konnect.Inputs.GatewayPluginAiRequestTransformerServiceArgs
{
Id = "string",
},
Tags = new[]
{
"string",
},
});
example, err := konnect.NewGatewayPluginAiRequestTransformer(ctx, "gatewayPluginAiRequestTransformerResource", &konnect.GatewayPluginAiRequestTransformerArgs{
Config: &.GatewayPluginAiRequestTransformerConfigArgs{
HttpProxyHost: pulumi.String("string"),
HttpProxyPort: pulumi.Float64(0),
HttpTimeout: pulumi.Float64(0),
HttpsProxyHost: pulumi.String("string"),
HttpsProxyPort: pulumi.Float64(0),
HttpsVerify: pulumi.Bool(false),
Llm: &.GatewayPluginAiRequestTransformerConfigLlmArgs{
Auth: &.GatewayPluginAiRequestTransformerConfigLlmAuthArgs{
AllowOverride: pulumi.Bool(false),
AwsAccessKeyId: pulumi.String("string"),
AwsSecretAccessKey: pulumi.String("string"),
AzureClientId: pulumi.String("string"),
AzureClientSecret: pulumi.String("string"),
AzureTenantId: pulumi.String("string"),
AzureUseManagedIdentity: pulumi.Bool(false),
GcpServiceAccountJson: pulumi.String("string"),
GcpUseServiceAccount: pulumi.Bool(false),
HeaderName: pulumi.String("string"),
HeaderValue: pulumi.String("string"),
ParamLocation: pulumi.String("string"),
ParamName: pulumi.String("string"),
ParamValue: pulumi.String("string"),
},
Logging: &.GatewayPluginAiRequestTransformerConfigLlmLoggingArgs{
LogPayloads: pulumi.Bool(false),
LogStatistics: pulumi.Bool(false),
},
Model: &.GatewayPluginAiRequestTransformerConfigLlmModelArgs{
Name: pulumi.String("string"),
Options: &.GatewayPluginAiRequestTransformerConfigLlmModelOptionsArgs{
AnthropicVersion: pulumi.String("string"),
AzureApiVersion: pulumi.String("string"),
AzureDeploymentId: pulumi.String("string"),
AzureInstance: pulumi.String("string"),
Bedrock: &.GatewayPluginAiRequestTransformerConfigLlmModelOptionsBedrockArgs{
AwsRegion: pulumi.String("string"),
},
Gemini: &.GatewayPluginAiRequestTransformerConfigLlmModelOptionsGeminiArgs{
ApiEndpoint: pulumi.String("string"),
LocationId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
Huggingface: &.GatewayPluginAiRequestTransformerConfigLlmModelOptionsHuggingfaceArgs{
UseCache: pulumi.Bool(false),
WaitForModel: pulumi.Bool(false),
},
InputCost: pulumi.Float64(0),
Llama2Format: pulumi.String("string"),
MaxTokens: pulumi.Float64(0),
MistralFormat: pulumi.String("string"),
OutputCost: pulumi.Float64(0),
Temperature: pulumi.Float64(0),
TopK: pulumi.Float64(0),
TopP: pulumi.Float64(0),
UpstreamPath: pulumi.String("string"),
UpstreamUrl: pulumi.String("string"),
},
Provider: pulumi.String("string"),
},
RouteType: pulumi.String("string"),
},
MaxRequestBodySize: pulumi.Float64(0),
Prompt: pulumi.String("string"),
TransformationExtractPattern: pulumi.String("string"),
},
ControlPlaneId: pulumi.String("string"),
ConsumerGroup: &.GatewayPluginAiRequestTransformerConsumerGroupArgs{
Id: pulumi.String("string"),
},
Enabled: pulumi.Bool(false),
GatewayPluginAiRequestTransformerId: pulumi.String("string"),
InstanceName: pulumi.String("string"),
Ordering: &.GatewayPluginAiRequestTransformerOrderingArgs{
After: &.GatewayPluginAiRequestTransformerOrderingAfterArgs{
Accesses: pulumi.StringArray{
pulumi.String("string"),
},
},
Before: &.GatewayPluginAiRequestTransformerOrderingBeforeArgs{
Accesses: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Protocols: pulumi.StringArray{
pulumi.String("string"),
},
Route: &.GatewayPluginAiRequestTransformerRouteArgs{
Id: pulumi.String("string"),
},
Service: &.GatewayPluginAiRequestTransformerServiceArgs{
Id: pulumi.String("string"),
},
Tags: pulumi.StringArray{
pulumi.String("string"),
},
})
var gatewayPluginAiRequestTransformerResource = new GatewayPluginAiRequestTransformer("gatewayPluginAiRequestTransformerResource", GatewayPluginAiRequestTransformerArgs.builder()
.config(GatewayPluginAiRequestTransformerConfigArgs.builder()
.httpProxyHost("string")
.httpProxyPort(0)
.httpTimeout(0)
.httpsProxyHost("string")
.httpsProxyPort(0)
.httpsVerify(false)
.llm(GatewayPluginAiRequestTransformerConfigLlmArgs.builder()
.auth(GatewayPluginAiRequestTransformerConfigLlmAuthArgs.builder()
.allowOverride(false)
.awsAccessKeyId("string")
.awsSecretAccessKey("string")
.azureClientId("string")
.azureClientSecret("string")
.azureTenantId("string")
.azureUseManagedIdentity(false)
.gcpServiceAccountJson("string")
.gcpUseServiceAccount(false)
.headerName("string")
.headerValue("string")
.paramLocation("string")
.paramName("string")
.paramValue("string")
.build())
.logging(GatewayPluginAiRequestTransformerConfigLlmLoggingArgs.builder()
.logPayloads(false)
.logStatistics(false)
.build())
.model(GatewayPluginAiRequestTransformerConfigLlmModelArgs.builder()
.name("string")
.options(GatewayPluginAiRequestTransformerConfigLlmModelOptionsArgs.builder()
.anthropicVersion("string")
.azureApiVersion("string")
.azureDeploymentId("string")
.azureInstance("string")
.bedrock(GatewayPluginAiRequestTransformerConfigLlmModelOptionsBedrockArgs.builder()
.awsRegion("string")
.build())
.gemini(GatewayPluginAiRequestTransformerConfigLlmModelOptionsGeminiArgs.builder()
.apiEndpoint("string")
.locationId("string")
.projectId("string")
.build())
.huggingface(GatewayPluginAiRequestTransformerConfigLlmModelOptionsHuggingfaceArgs.builder()
.useCache(false)
.waitForModel(false)
.build())
.inputCost(0)
.llama2Format("string")
.maxTokens(0)
.mistralFormat("string")
.outputCost(0)
.temperature(0)
.topK(0)
.topP(0)
.upstreamPath("string")
.upstreamUrl("string")
.build())
.provider("string")
.build())
.routeType("string")
.build())
.maxRequestBodySize(0)
.prompt("string")
.transformationExtractPattern("string")
.build())
.controlPlaneId("string")
.consumerGroup(GatewayPluginAiRequestTransformerConsumerGroupArgs.builder()
.id("string")
.build())
.enabled(false)
.gatewayPluginAiRequestTransformerId("string")
.instanceName("string")
.ordering(GatewayPluginAiRequestTransformerOrderingArgs.builder()
.after(GatewayPluginAiRequestTransformerOrderingAfterArgs.builder()
.accesses("string")
.build())
.before(GatewayPluginAiRequestTransformerOrderingBeforeArgs.builder()
.accesses("string")
.build())
.build())
.protocols("string")
.route(GatewayPluginAiRequestTransformerRouteArgs.builder()
.id("string")
.build())
.service(GatewayPluginAiRequestTransformerServiceArgs.builder()
.id("string")
.build())
.tags("string")
.build());
gateway_plugin_ai_request_transformer_resource = konnect.GatewayPluginAiRequestTransformer("gatewayPluginAiRequestTransformerResource",
config={
"http_proxy_host": "string",
"http_proxy_port": 0,
"http_timeout": 0,
"https_proxy_host": "string",
"https_proxy_port": 0,
"https_verify": False,
"llm": {
"auth": {
"allow_override": False,
"aws_access_key_id": "string",
"aws_secret_access_key": "string",
"azure_client_id": "string",
"azure_client_secret": "string",
"azure_tenant_id": "string",
"azure_use_managed_identity": False,
"gcp_service_account_json": "string",
"gcp_use_service_account": False,
"header_name": "string",
"header_value": "string",
"param_location": "string",
"param_name": "string",
"param_value": "string",
},
"logging": {
"log_payloads": False,
"log_statistics": False,
},
"model": {
"name": "string",
"options": {
"anthropic_version": "string",
"azure_api_version": "string",
"azure_deployment_id": "string",
"azure_instance": "string",
"bedrock": {
"aws_region": "string",
},
"gemini": {
"api_endpoint": "string",
"location_id": "string",
"project_id": "string",
},
"huggingface": {
"use_cache": False,
"wait_for_model": False,
},
"input_cost": 0,
"llama2_format": "string",
"max_tokens": 0,
"mistral_format": "string",
"output_cost": 0,
"temperature": 0,
"top_k": 0,
"top_p": 0,
"upstream_path": "string",
"upstream_url": "string",
},
"provider": "string",
},
"route_type": "string",
},
"max_request_body_size": 0,
"prompt": "string",
"transformation_extract_pattern": "string",
},
control_plane_id="string",
consumer_group={
"id": "string",
},
enabled=False,
gateway_plugin_ai_request_transformer_id="string",
instance_name="string",
ordering={
"after": {
"accesses": ["string"],
},
"before": {
"accesses": ["string"],
},
},
protocols=["string"],
route={
"id": "string",
},
service={
"id": "string",
},
tags=["string"])
const gatewayPluginAiRequestTransformerResource = new konnect.GatewayPluginAiRequestTransformer("gatewayPluginAiRequestTransformerResource", {
config: {
httpProxyHost: "string",
httpProxyPort: 0,
httpTimeout: 0,
httpsProxyHost: "string",
httpsProxyPort: 0,
httpsVerify: false,
llm: {
auth: {
allowOverride: false,
awsAccessKeyId: "string",
awsSecretAccessKey: "string",
azureClientId: "string",
azureClientSecret: "string",
azureTenantId: "string",
azureUseManagedIdentity: false,
gcpServiceAccountJson: "string",
gcpUseServiceAccount: false,
headerName: "string",
headerValue: "string",
paramLocation: "string",
paramName: "string",
paramValue: "string",
},
logging: {
logPayloads: false,
logStatistics: false,
},
model: {
name: "string",
options: {
anthropicVersion: "string",
azureApiVersion: "string",
azureDeploymentId: "string",
azureInstance: "string",
bedrock: {
awsRegion: "string",
},
gemini: {
apiEndpoint: "string",
locationId: "string",
projectId: "string",
},
huggingface: {
useCache: false,
waitForModel: false,
},
inputCost: 0,
llama2Format: "string",
maxTokens: 0,
mistralFormat: "string",
outputCost: 0,
temperature: 0,
topK: 0,
topP: 0,
upstreamPath: "string",
upstreamUrl: "string",
},
provider: "string",
},
routeType: "string",
},
maxRequestBodySize: 0,
prompt: "string",
transformationExtractPattern: "string",
},
controlPlaneId: "string",
consumerGroup: {
id: "string",
},
enabled: false,
gatewayPluginAiRequestTransformerId: "string",
instanceName: "string",
ordering: {
after: {
accesses: ["string"],
},
before: {
accesses: ["string"],
},
},
protocols: ["string"],
route: {
id: "string",
},
service: {
id: "string",
},
tags: ["string"],
});
type: konnect:GatewayPluginAiRequestTransformer
properties:
config:
httpProxyHost: string
httpProxyPort: 0
httpTimeout: 0
httpsProxyHost: string
httpsProxyPort: 0
httpsVerify: false
llm:
auth:
allowOverride: false
awsAccessKeyId: string
awsSecretAccessKey: string
azureClientId: string
azureClientSecret: string
azureTenantId: string
azureUseManagedIdentity: false
gcpServiceAccountJson: string
gcpUseServiceAccount: false
headerName: string
headerValue: string
paramLocation: string
paramName: string
paramValue: string
logging:
logPayloads: false
logStatistics: false
model:
name: string
options:
anthropicVersion: string
azureApiVersion: string
azureDeploymentId: string
azureInstance: string
bedrock:
awsRegion: string
gemini:
apiEndpoint: string
locationId: string
projectId: string
huggingface:
useCache: false
waitForModel: false
inputCost: 0
llama2Format: string
maxTokens: 0
mistralFormat: string
outputCost: 0
temperature: 0
topK: 0
topP: 0
upstreamPath: string
upstreamUrl: string
provider: string
routeType: string
maxRequestBodySize: 0
prompt: string
transformationExtractPattern: string
consumerGroup:
id: string
controlPlaneId: string
enabled: false
gatewayPluginAiRequestTransformerId: string
instanceName: string
ordering:
after:
accesses:
- string
before:
accesses:
- string
protocols:
- string
route:
id: string
service:
id: string
tags:
- string
GatewayPluginAiRequestTransformer Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The GatewayPluginAiRequestTransformer resource accepts the following input properties:
- Config
Gateway
Plugin Ai Request Transformer Config - Control
Plane stringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- Consumer
Group GatewayPlugin Ai Request Transformer Consumer Group - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- Enabled bool
- Whether the plugin is applied.
- Gateway
Plugin stringAi Request Transformer Id - The ID of this resource.
- Instance
Name string - Ordering
Gateway
Plugin Ai Request Transformer Ordering - Protocols List<string>
- A set of strings representing HTTP protocols.
- Route
Gateway
Plugin Ai Request Transformer Route - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- Service
Gateway
Plugin Ai Request Transformer Service - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- List<string>
- An optional set of strings associated with the Plugin for grouping and filtering.
- Config
Gateway
Plugin Ai Request Transformer Config Args - Control
Plane stringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- Consumer
Group GatewayPlugin Ai Request Transformer Consumer Group Args - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- Enabled bool
- Whether the plugin is applied.
- Gateway
Plugin stringAi Request Transformer Id - The ID of this resource.
- Instance
Name string - Ordering
Gateway
Plugin Ai Request Transformer Ordering Args - Protocols []string
- A set of strings representing HTTP protocols.
- Route
Gateway
Plugin Ai Request Transformer Route Args - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- Service
Gateway
Plugin Ai Request Transformer Service Args - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- []string
- An optional set of strings associated with the Plugin for grouping and filtering.
- config
Gateway
Plugin Ai Request Transformer Config - control
Plane StringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- consumer
Group GatewayPlugin Ai Request Transformer Consumer Group - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- enabled Boolean
- Whether the plugin is applied.
- gateway
Plugin StringAi Request Transformer Id - The ID of this resource.
- instance
Name String - ordering
Gateway
Plugin Ai Request Transformer Ordering - protocols List<String>
- A set of strings representing HTTP protocols.
- route
Gateway
Plugin Ai Request Transformer Route - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service
Gateway
Plugin Ai Request Transformer Service - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- List<String>
- An optional set of strings associated with the Plugin for grouping and filtering.
- config
Gateway
Plugin Ai Request Transformer Config - control
Plane stringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- consumer
Group GatewayPlugin Ai Request Transformer Consumer Group - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- enabled boolean
- Whether the plugin is applied.
- gateway
Plugin stringAi Request Transformer Id - The ID of this resource.
- instance
Name string - ordering
Gateway
Plugin Ai Request Transformer Ordering - protocols string[]
- A set of strings representing HTTP protocols.
- route
Gateway
Plugin Ai Request Transformer Route - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service
Gateway
Plugin Ai Request Transformer Service - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- string[]
- An optional set of strings associated with the Plugin for grouping and filtering.
- config
Gateway
Plugin Ai Request Transformer Config Args - control_
plane_ strid - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- consumer_
group GatewayPlugin Ai Request Transformer Consumer Group Args - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- enabled bool
- Whether the plugin is applied.
- gateway_
plugin_ strai_ request_ transformer_ id - The ID of this resource.
- instance_
name str - ordering
Gateway
Plugin Ai Request Transformer Ordering Args - protocols Sequence[str]
- A set of strings representing HTTP protocols.
- route
Gateway
Plugin Ai Request Transformer Route Args - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service
Gateway
Plugin Ai Request Transformer Service Args - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- Sequence[str]
- An optional set of strings associated with the Plugin for grouping and filtering.
- config Property Map
- control
Plane StringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- consumer
Group Property Map - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- enabled Boolean
- Whether the plugin is applied.
- gateway
Plugin StringAi Request Transformer Id - The ID of this resource.
- instance
Name String - ordering Property Map
- protocols List<String>
- A set of strings representing HTTP protocols.
- route Property Map
- If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service Property Map
- If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- List<String>
- An optional set of strings associated with the Plugin for grouping and filtering.
Outputs
All input properties are implicitly available as output properties. Additionally, the GatewayPluginAiRequestTransformer resource produces the following output properties:
- created_
at float - Unix epoch when the resource was created.
- id str
- The provider-assigned unique ID for this managed resource.
- updated_
at float - Unix epoch when the resource was last updated.
Look up Existing GatewayPluginAiRequestTransformer Resource
Get an existing GatewayPluginAiRequestTransformer resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: GatewayPluginAiRequestTransformerState, opts?: CustomResourceOptions): GatewayPluginAiRequestTransformer
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
config: Optional[GatewayPluginAiRequestTransformerConfigArgs] = None,
consumer_group: Optional[GatewayPluginAiRequestTransformerConsumerGroupArgs] = None,
control_plane_id: Optional[str] = None,
created_at: Optional[float] = None,
enabled: Optional[bool] = None,
gateway_plugin_ai_request_transformer_id: Optional[str] = None,
instance_name: Optional[str] = None,
ordering: Optional[GatewayPluginAiRequestTransformerOrderingArgs] = None,
protocols: Optional[Sequence[str]] = None,
route: Optional[GatewayPluginAiRequestTransformerRouteArgs] = None,
service: Optional[GatewayPluginAiRequestTransformerServiceArgs] = None,
tags: Optional[Sequence[str]] = None,
updated_at: Optional[float] = None) -> GatewayPluginAiRequestTransformer
func GetGatewayPluginAiRequestTransformer(ctx *Context, name string, id IDInput, state *GatewayPluginAiRequestTransformerState, opts ...ResourceOption) (*GatewayPluginAiRequestTransformer, error)
public static GatewayPluginAiRequestTransformer Get(string name, Input<string> id, GatewayPluginAiRequestTransformerState? state, CustomResourceOptions? opts = null)
public static GatewayPluginAiRequestTransformer get(String name, Output<String> id, GatewayPluginAiRequestTransformerState state, CustomResourceOptions options)
resources: _: type: konnect:GatewayPluginAiRequestTransformer get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Config
Gateway
Plugin Ai Request Transformer Config - Consumer
Group GatewayPlugin Ai Request Transformer Consumer Group - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- Control
Plane stringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- Created
At double - Unix epoch when the resource was created.
- Enabled bool
- Whether the plugin is applied.
- Gateway
Plugin stringAi Request Transformer Id - The ID of this resource.
- Instance
Name string - Ordering
Gateway
Plugin Ai Request Transformer Ordering - Protocols List<string>
- A set of strings representing HTTP protocols.
- Route
Gateway
Plugin Ai Request Transformer Route - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- Service
Gateway
Plugin Ai Request Transformer Service - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- List<string>
- An optional set of strings associated with the Plugin for grouping and filtering.
- Updated
At double - Unix epoch when the resource was last updated.
- Config
Gateway
Plugin Ai Request Transformer Config Args - Consumer
Group GatewayPlugin Ai Request Transformer Consumer Group Args - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- Control
Plane stringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- Created
At float64 - Unix epoch when the resource was created.
- Enabled bool
- Whether the plugin is applied.
- Gateway
Plugin stringAi Request Transformer Id - The ID of this resource.
- Instance
Name string - Ordering
Gateway
Plugin Ai Request Transformer Ordering Args - Protocols []string
- A set of strings representing HTTP protocols.
- Route
Gateway
Plugin Ai Request Transformer Route Args - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- Service
Gateway
Plugin Ai Request Transformer Service Args - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- []string
- An optional set of strings associated with the Plugin for grouping and filtering.
- Updated
At float64 - Unix epoch when the resource was last updated.
- config
Gateway
Plugin Ai Request Transformer Config - consumer
Group GatewayPlugin Ai Request Transformer Consumer Group - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- control
Plane StringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- created
At Double - Unix epoch when the resource was created.
- enabled Boolean
- Whether the plugin is applied.
- gateway
Plugin StringAi Request Transformer Id - The ID of this resource.
- instance
Name String - ordering
Gateway
Plugin Ai Request Transformer Ordering - protocols List<String>
- A set of strings representing HTTP protocols.
- route
Gateway
Plugin Ai Request Transformer Route - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service
Gateway
Plugin Ai Request Transformer Service - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- List<String>
- An optional set of strings associated with the Plugin for grouping and filtering.
- updated
At Double - Unix epoch when the resource was last updated.
- config
Gateway
Plugin Ai Request Transformer Config - consumer
Group GatewayPlugin Ai Request Transformer Consumer Group - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- control
Plane stringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- created
At number - Unix epoch when the resource was created.
- enabled boolean
- Whether the plugin is applied.
- gateway
Plugin stringAi Request Transformer Id - The ID of this resource.
- instance
Name string - ordering
Gateway
Plugin Ai Request Transformer Ordering - protocols string[]
- A set of strings representing HTTP protocols.
- route
Gateway
Plugin Ai Request Transformer Route - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service
Gateway
Plugin Ai Request Transformer Service - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- string[]
- An optional set of strings associated with the Plugin for grouping and filtering.
- updated
At number - Unix epoch when the resource was last updated.
- config
Gateway
Plugin Ai Request Transformer Config Args - consumer_
group GatewayPlugin Ai Request Transformer Consumer Group Args - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- control_
plane_ strid - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- created_
at float - Unix epoch when the resource was created.
- enabled bool
- Whether the plugin is applied.
- gateway_
plugin_ strai_ request_ transformer_ id - The ID of this resource.
- instance_
name str - ordering
Gateway
Plugin Ai Request Transformer Ordering Args - protocols Sequence[str]
- A set of strings representing HTTP protocols.
- route
Gateway
Plugin Ai Request Transformer Route Args - If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service
Gateway
Plugin Ai Request Transformer Service Args - If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- Sequence[str]
- An optional set of strings associated with the Plugin for grouping and filtering.
- updated_
at float - Unix epoch when the resource was last updated.
- config Property Map
- consumer
Group Property Map - If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
- control
Plane StringId - The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
- created
At Number - Unix epoch when the resource was created.
- enabled Boolean
- Whether the plugin is applied.
- gateway
Plugin StringAi Request Transformer Id - The ID of this resource.
- instance
Name String - ordering Property Map
- protocols List<String>
- A set of strings representing HTTP protocols.
- route Property Map
- If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
- service Property Map
- If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
- List<String>
- An optional set of strings associated with the Plugin for grouping and filtering.
- updated
At Number - Unix epoch when the resource was last updated.
Supporting Types
GatewayPluginAiRequestTransformerConfig, GatewayPluginAiRequestTransformerConfigArgs
- Http
Proxy stringHost - A string representing a host name, such as example.com.
- Http
Proxy doublePort - An integer representing a port number between 0 and 65535, inclusive.
- Http
Timeout double - Timeout in milliseconds for the AI upstream service.
- Https
Proxy stringHost - A string representing a host name, such as example.com.
- Https
Proxy doublePort - An integer representing a port number between 0 and 65535, inclusive.
- Https
Verify bool - Verify the TLS certificate of the AI upstream service.
- Llm
Gateway
Plugin Ai Request Transformer Config Llm - Max
Request doubleBody Size - max allowed body size allowed to be introspected
- Prompt string
- Use this prompt to tune the LLM system/assistant message for the incoming proxy request (from the client), and what you are expecting in return.
- Transformation
Extract stringPattern - Defines the regular expression that must match to indicate a successful AI transformation at the request phase. The first match will be set as the outgoing body. If the AI service's response doesn't match this pattern, it is marked as a failure.
- Http
Proxy stringHost - A string representing a host name, such as example.com.
- Http
Proxy float64Port - An integer representing a port number between 0 and 65535, inclusive.
- Http
Timeout float64 - Timeout in milliseconds for the AI upstream service.
- Https
Proxy stringHost - A string representing a host name, such as example.com.
- Https
Proxy float64Port - An integer representing a port number between 0 and 65535, inclusive.
- Https
Verify bool - Verify the TLS certificate of the AI upstream service.
- Llm
Gateway
Plugin Ai Request Transformer Config Llm - Max
Request float64Body Size - max allowed body size allowed to be introspected
- Prompt string
- Use this prompt to tune the LLM system/assistant message for the incoming proxy request (from the client), and what you are expecting in return.
- Transformation
Extract stringPattern - Defines the regular expression that must match to indicate a successful AI transformation at the request phase. The first match will be set as the outgoing body. If the AI service's response doesn't match this pattern, it is marked as a failure.
- http
Proxy StringHost - A string representing a host name, such as example.com.
- http
Proxy DoublePort - An integer representing a port number between 0 and 65535, inclusive.
- http
Timeout Double - Timeout in milliseconds for the AI upstream service.
- https
Proxy StringHost - A string representing a host name, such as example.com.
- https
Proxy DoublePort - An integer representing a port number between 0 and 65535, inclusive.
- https
Verify Boolean - Verify the TLS certificate of the AI upstream service.
- llm
Gateway
Plugin Ai Request Transformer Config Llm - max
Request DoubleBody Size - max allowed body size allowed to be introspected
- prompt String
- Use this prompt to tune the LLM system/assistant message for the incoming proxy request (from the client), and what you are expecting in return.
- transformation
Extract StringPattern - Defines the regular expression that must match to indicate a successful AI transformation at the request phase. The first match will be set as the outgoing body. If the AI service's response doesn't match this pattern, it is marked as a failure.
- http
Proxy stringHost - A string representing a host name, such as example.com.
- http
Proxy numberPort - An integer representing a port number between 0 and 65535, inclusive.
- http
Timeout number - Timeout in milliseconds for the AI upstream service.
- https
Proxy stringHost - A string representing a host name, such as example.com.
- https
Proxy numberPort - An integer representing a port number between 0 and 65535, inclusive.
- https
Verify boolean - Verify the TLS certificate of the AI upstream service.
- llm
Gateway
Plugin Ai Request Transformer Config Llm - max
Request numberBody Size - max allowed body size allowed to be introspected
- prompt string
- Use this prompt to tune the LLM system/assistant message for the incoming proxy request (from the client), and what you are expecting in return.
- transformation
Extract stringPattern - Defines the regular expression that must match to indicate a successful AI transformation at the request phase. The first match will be set as the outgoing body. If the AI service's response doesn't match this pattern, it is marked as a failure.
- http_
proxy_ strhost - A string representing a host name, such as example.com.
- http_
proxy_ floatport - An integer representing a port number between 0 and 65535, inclusive.
- http_
timeout float - Timeout in milliseconds for the AI upstream service.
- https_
proxy_ strhost - A string representing a host name, such as example.com.
- https_
proxy_ floatport - An integer representing a port number between 0 and 65535, inclusive.
- https_
verify bool - Verify the TLS certificate of the AI upstream service.
- llm
Gateway
Plugin Ai Request Transformer Config Llm - max_
request_ floatbody_ size - max allowed body size allowed to be introspected
- prompt str
- Use this prompt to tune the LLM system/assistant message for the incoming proxy request (from the client), and what you are expecting in return.
- transformation_
extract_ strpattern - Defines the regular expression that must match to indicate a successful AI transformation at the request phase. The first match will be set as the outgoing body. If the AI service's response doesn't match this pattern, it is marked as a failure.
- http
Proxy StringHost - A string representing a host name, such as example.com.
- http
Proxy NumberPort - An integer representing a port number between 0 and 65535, inclusive.
- http
Timeout Number - Timeout in milliseconds for the AI upstream service.
- https
Proxy StringHost - A string representing a host name, such as example.com.
- https
Proxy NumberPort - An integer representing a port number between 0 and 65535, inclusive.
- https
Verify Boolean - Verify the TLS certificate of the AI upstream service.
- llm Property Map
- max
Request NumberBody Size - max allowed body size allowed to be introspected
- prompt String
- Use this prompt to tune the LLM system/assistant message for the incoming proxy request (from the client), and what you are expecting in return.
- transformation
Extract StringPattern - Defines the regular expression that must match to indicate a successful AI transformation at the request phase. The first match will be set as the outgoing body. If the AI service's response doesn't match this pattern, it is marked as a failure.
GatewayPluginAiRequestTransformerConfigLlm, GatewayPluginAiRequestTransformerConfigLlmArgs
- Auth
Gateway
Plugin Ai Request Transformer Config Llm Auth - Logging
Gateway
Plugin Ai Request Transformer Config Llm Logging - Model
Gateway
Plugin Ai Request Transformer Config Llm Model - Route
Type string - The model's operation implementation, for this provider. Set to
preserve
to pass through without transformation. must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
- Auth
Gateway
Plugin Ai Request Transformer Config Llm Auth - Logging
Gateway
Plugin Ai Request Transformer Config Llm Logging - Model
Gateway
Plugin Ai Request Transformer Config Llm Model - Route
Type string - The model's operation implementation, for this provider. Set to
preserve
to pass through without transformation. must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
- auth
Gateway
Plugin Ai Request Transformer Config Llm Auth - logging
Gateway
Plugin Ai Request Transformer Config Llm Logging - model
Gateway
Plugin Ai Request Transformer Config Llm Model - route
Type String - The model's operation implementation, for this provider. Set to
preserve
to pass through without transformation. must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
- auth
Gateway
Plugin Ai Request Transformer Config Llm Auth - logging
Gateway
Plugin Ai Request Transformer Config Llm Logging - model
Gateway
Plugin Ai Request Transformer Config Llm Model - route
Type string - The model's operation implementation, for this provider. Set to
preserve
to pass through without transformation. must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
- auth
Gateway
Plugin Ai Request Transformer Config Llm Auth - logging
Gateway
Plugin Ai Request Transformer Config Llm Logging - model
Gateway
Plugin Ai Request Transformer Config Llm Model - route_
type str - The model's operation implementation, for this provider. Set to
preserve
to pass through without transformation. must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
- auth Property Map
- logging Property Map
- model Property Map
- route
Type String - The model's operation implementation, for this provider. Set to
preserve
to pass through without transformation. must be one of ["llm/v1/chat", "llm/v1/completions", "preserve"]
GatewayPluginAiRequestTransformerConfigLlmAuth, GatewayPluginAiRequestTransformerConfigLlmAuthArgs
- Allow
Override bool - If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
- Aws
Access stringKey Id - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
- Aws
Secret stringAccess Key - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
- Azure
Client stringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
- Azure
Client stringSecret - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
- Azure
Tenant stringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
- Azure
Use boolManaged Identity - Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
- Gcp
Service stringAccount Json - Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable
GCP_SERVICE_ACCOUNT
. - Gcp
Use boolService Account - Use service account auth for GCP-based providers and models.
- Header
Name string - If AI model requires authentication via Authorization or API key header, specify its name here.
- Header
Value string - Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
- Param
Location string - Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
- Param
Name string - If AI model requires authentication via query parameter, specify its name here.
- Param
Value string - Specify the full parameter value for 'param_name'.
- Allow
Override bool - If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
- Aws
Access stringKey Id - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
- Aws
Secret stringAccess Key - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
- Azure
Client stringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
- Azure
Client stringSecret - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
- Azure
Tenant stringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
- Azure
Use boolManaged Identity - Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
- Gcp
Service stringAccount Json - Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable
GCP_SERVICE_ACCOUNT
. - Gcp
Use boolService Account - Use service account auth for GCP-based providers and models.
- Header
Name string - If AI model requires authentication via Authorization or API key header, specify its name here.
- Header
Value string - Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
- Param
Location string - Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
- Param
Name string - If AI model requires authentication via query parameter, specify its name here.
- Param
Value string - Specify the full parameter value for 'param_name'.
- allow
Override Boolean - If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
- aws
Access StringKey Id - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
- aws
Secret StringAccess Key - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
- azure
Client StringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
- azure
Client StringSecret - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
- azure
Tenant StringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
- azure
Use BooleanManaged Identity - Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
- gcp
Service StringAccount Json - Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable
GCP_SERVICE_ACCOUNT
. - gcp
Use BooleanService Account - Use service account auth for GCP-based providers and models.
- header
Name String - If AI model requires authentication via Authorization or API key header, specify its name here.
- header
Value String - Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
- param
Location String - Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
- param
Name String - If AI model requires authentication via query parameter, specify its name here.
- param
Value String - Specify the full parameter value for 'param_name'.
- allow
Override boolean - If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
- aws
Access stringKey Id - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
- aws
Secret stringAccess Key - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
- azure
Client stringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
- azure
Client stringSecret - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
- azure
Tenant stringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
- azure
Use booleanManaged Identity - Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
- gcp
Service stringAccount Json - Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable
GCP_SERVICE_ACCOUNT
. - gcp
Use booleanService Account - Use service account auth for GCP-based providers and models.
- header
Name string - If AI model requires authentication via Authorization or API key header, specify its name here.
- header
Value string - Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
- param
Location string - Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
- param
Name string - If AI model requires authentication via query parameter, specify its name here.
- param
Value string - Specify the full parameter value for 'param_name'.
- allow_
override bool - If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
- aws_
access_ strkey_ id - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
- aws_
secret_ straccess_ key - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
- azure_
client_ strid - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
- azure_
client_ strsecret - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
- azure_
tenant_ strid - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
- azure_
use_ boolmanaged_ identity - Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
- gcp_
service_ straccount_ json - Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable
GCP_SERVICE_ACCOUNT
. - gcp_
use_ boolservice_ account - Use service account auth for GCP-based providers and models.
- header_
name str - If AI model requires authentication via Authorization or API key header, specify its name here.
- header_
value str - Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
- param_
location str - Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
- param_
name str - If AI model requires authentication via query parameter, specify its name here.
- param_
value str - Specify the full parameter value for 'param_name'.
- allow
Override Boolean - If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin.
- aws
Access StringKey Id - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
- aws
Secret StringAccess Key - Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
- azure
Client StringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
- azure
Client StringSecret - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
- azure
Tenant StringId - If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
- azure
Use BooleanManaged Identity - Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models.
- gcp
Service StringAccount Json - Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable
GCP_SERVICE_ACCOUNT
. - gcp
Use BooleanService Account - Use service account auth for GCP-based providers and models.
- header
Name String - If AI model requires authentication via Authorization or API key header, specify its name here.
- header
Value String - Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
- param
Location String - Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
- param
Name String - If AI model requires authentication via query parameter, specify its name here.
- param
Value String - Specify the full parameter value for 'param_name'.
GatewayPluginAiRequestTransformerConfigLlmLogging, GatewayPluginAiRequestTransformerConfigLlmLoggingArgs
- Log
Payloads bool - If enabled, will log the request and response body into the Kong log plugin(s) output.
- Log
Statistics bool - If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
- Log
Payloads bool - If enabled, will log the request and response body into the Kong log plugin(s) output.
- Log
Statistics bool - If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
- log
Payloads Boolean - If enabled, will log the request and response body into the Kong log plugin(s) output.
- log
Statistics Boolean - If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
- log
Payloads boolean - If enabled, will log the request and response body into the Kong log plugin(s) output.
- log
Statistics boolean - If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
- log_
payloads bool - If enabled, will log the request and response body into the Kong log plugin(s) output.
- log_
statistics bool - If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
- log
Payloads Boolean - If enabled, will log the request and response body into the Kong log plugin(s) output.
- log
Statistics Boolean - If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output.
GatewayPluginAiRequestTransformerConfigLlmModel, GatewayPluginAiRequestTransformerConfigLlmModelArgs
- Name string
- Model name to execute.
- Options
Gateway
Plugin Ai Request Transformer Config Llm Model Options - Key/value settings for the model
- Provider string
- AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
- Name string
- Model name to execute.
- Options
Gateway
Plugin Ai Request Transformer Config Llm Model Options - Key/value settings for the model
- Provider string
- AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
- name String
- Model name to execute.
- options
Gateway
Plugin Ai Request Transformer Config Llm Model Options - Key/value settings for the model
- provider String
- AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
- name string
- Model name to execute.
- options
Gateway
Plugin Ai Request Transformer Config Llm Model Options - Key/value settings for the model
- provider string
- AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
- name str
- Model name to execute.
- options
Gateway
Plugin Ai Request Transformer Config Llm Model Options - Key/value settings for the model
- provider str
- AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
- name String
- Model name to execute.
- options Property Map
- Key/value settings for the model
- provider String
- AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
GatewayPluginAiRequestTransformerConfigLlmModelOptions, GatewayPluginAiRequestTransformerConfigLlmModelOptionsArgs
- Anthropic
Version string - Defines the schema/API version, if using Anthropic provider.
- Azure
Api stringVersion - 'api-version' for Azure OpenAI instances.
- Azure
Deployment stringId - Deployment ID for Azure OpenAI instances.
- Azure
Instance string - Instance name for Azure OpenAI hosted models.
- Bedrock
Gateway
Plugin Ai Request Transformer Config Llm Model Options Bedrock - Gemini
Gateway
Plugin Ai Request Transformer Config Llm Model Options Gemini - Huggingface
Gateway
Plugin Ai Request Transformer Config Llm Model Options Huggingface - Input
Cost double - Defines the cost per 1M tokens in your prompt.
- Llama2Format string
- If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
- Max
Tokens double - Defines the max_tokens, if using chat or completion models.
- Mistral
Format string - If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
- Output
Cost double - Defines the cost per 1M tokens in the output of the AI.
- Temperature double
- Defines the matching temperature, if using chat or completion models.
- Top
K double - Defines the top-k most likely tokens, if supported.
- Top
P double - Defines the top-p probability mass, if supported.
- Upstream
Path string - Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
- Upstream
Url string - Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
- Anthropic
Version string - Defines the schema/API version, if using Anthropic provider.
- Azure
Api stringVersion - 'api-version' for Azure OpenAI instances.
- Azure
Deployment stringId - Deployment ID for Azure OpenAI instances.
- Azure
Instance string - Instance name for Azure OpenAI hosted models.
- Bedrock
Gateway
Plugin Ai Request Transformer Config Llm Model Options Bedrock - Gemini
Gateway
Plugin Ai Request Transformer Config Llm Model Options Gemini - Huggingface
Gateway
Plugin Ai Request Transformer Config Llm Model Options Huggingface - Input
Cost float64 - Defines the cost per 1M tokens in your prompt.
- Llama2Format string
- If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
- Max
Tokens float64 - Defines the max_tokens, if using chat or completion models.
- Mistral
Format string - If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
- Output
Cost float64 - Defines the cost per 1M tokens in the output of the AI.
- Temperature float64
- Defines the matching temperature, if using chat or completion models.
- Top
K float64 - Defines the top-k most likely tokens, if supported.
- Top
P float64 - Defines the top-p probability mass, if supported.
- Upstream
Path string - Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
- Upstream
Url string - Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
- anthropic
Version String - Defines the schema/API version, if using Anthropic provider.
- azure
Api StringVersion - 'api-version' for Azure OpenAI instances.
- azure
Deployment StringId - Deployment ID for Azure OpenAI instances.
- azure
Instance String - Instance name for Azure OpenAI hosted models.
- bedrock
Gateway
Plugin Ai Request Transformer Config Llm Model Options Bedrock - gemini
Gateway
Plugin Ai Request Transformer Config Llm Model Options Gemini - huggingface
Gateway
Plugin Ai Request Transformer Config Llm Model Options Huggingface - input
Cost Double - Defines the cost per 1M tokens in your prompt.
- llama2Format String
- If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
- max
Tokens Double - Defines the max_tokens, if using chat or completion models.
- mistral
Format String - If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
- output
Cost Double - Defines the cost per 1M tokens in the output of the AI.
- temperature Double
- Defines the matching temperature, if using chat or completion models.
- top
K Double - Defines the top-k most likely tokens, if supported.
- top
P Double - Defines the top-p probability mass, if supported.
- upstream
Path String - Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
- upstream
Url String - Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
- anthropic
Version string - Defines the schema/API version, if using Anthropic provider.
- azure
Api stringVersion - 'api-version' for Azure OpenAI instances.
- azure
Deployment stringId - Deployment ID for Azure OpenAI instances.
- azure
Instance string - Instance name for Azure OpenAI hosted models.
- bedrock
Gateway
Plugin Ai Request Transformer Config Llm Model Options Bedrock - gemini
Gateway
Plugin Ai Request Transformer Config Llm Model Options Gemini - huggingface
Gateway
Plugin Ai Request Transformer Config Llm Model Options Huggingface - input
Cost number - Defines the cost per 1M tokens in your prompt.
- llama2Format string
- If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
- max
Tokens number - Defines the max_tokens, if using chat or completion models.
- mistral
Format string - If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
- output
Cost number - Defines the cost per 1M tokens in the output of the AI.
- temperature number
- Defines the matching temperature, if using chat or completion models.
- top
K number - Defines the top-k most likely tokens, if supported.
- top
P number - Defines the top-p probability mass, if supported.
- upstream
Path string - Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
- upstream
Url string - Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
- anthropic_
version str - Defines the schema/API version, if using Anthropic provider.
- azure_
api_ strversion - 'api-version' for Azure OpenAI instances.
- azure_
deployment_ strid - Deployment ID for Azure OpenAI instances.
- azure_
instance str - Instance name for Azure OpenAI hosted models.
- bedrock
Gateway
Plugin Ai Request Transformer Config Llm Model Options Bedrock - gemini
Gateway
Plugin Ai Request Transformer Config Llm Model Options Gemini - huggingface
Gateway
Plugin Ai Request Transformer Config Llm Model Options Huggingface - input_
cost float - Defines the cost per 1M tokens in your prompt.
- llama2_
format str - If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
- max_
tokens float - Defines the max_tokens, if using chat or completion models.
- mistral_
format str - If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
- output_
cost float - Defines the cost per 1M tokens in the output of the AI.
- temperature float
- Defines the matching temperature, if using chat or completion models.
- top_
k float - Defines the top-k most likely tokens, if supported.
- top_
p float - Defines the top-p probability mass, if supported.
- upstream_
path str - Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
- upstream_
url str - Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
- anthropic
Version String - Defines the schema/API version, if using Anthropic provider.
- azure
Api StringVersion - 'api-version' for Azure OpenAI instances.
- azure
Deployment StringId - Deployment ID for Azure OpenAI instances.
- azure
Instance String - Instance name for Azure OpenAI hosted models.
- bedrock Property Map
- gemini Property Map
- huggingface Property Map
- input
Cost Number - Defines the cost per 1M tokens in your prompt.
- llama2Format String
- If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
- max
Tokens Number - Defines the max_tokens, if using chat or completion models.
- mistral
Format String - If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
- output
Cost Number - Defines the cost per 1M tokens in the output of the AI.
- temperature Number
- Defines the matching temperature, if using chat or completion models.
- top
K Number - Defines the top-k most likely tokens, if supported.
- top
P Number - Defines the top-p probability mass, if supported.
- upstream
Path String - Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
- upstream
Url String - Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
GatewayPluginAiRequestTransformerConfigLlmModelOptionsBedrock, GatewayPluginAiRequestTransformerConfigLlmModelOptionsBedrockArgs
- Aws
Region string - If using AWS providers (Bedrock) you can override the
AWS_REGION
environment variable by setting this option.
- Aws
Region string - If using AWS providers (Bedrock) you can override the
AWS_REGION
environment variable by setting this option.
- aws
Region String - If using AWS providers (Bedrock) you can override the
AWS_REGION
environment variable by setting this option.
- aws
Region string - If using AWS providers (Bedrock) you can override the
AWS_REGION
environment variable by setting this option.
- aws_
region str - If using AWS providers (Bedrock) you can override the
AWS_REGION
environment variable by setting this option.
- aws
Region String - If using AWS providers (Bedrock) you can override the
AWS_REGION
environment variable by setting this option.
GatewayPluginAiRequestTransformerConfigLlmModelOptionsGemini, GatewayPluginAiRequestTransformerConfigLlmModelOptionsGeminiArgs
- Api
Endpoint string - If running Gemini on Vertex, specify the regional API endpoint (hostname only).
- Location
Id string - If running Gemini on Vertex, specify the location ID.
- Project
Id string - If running Gemini on Vertex, specify the project ID.
- Api
Endpoint string - If running Gemini on Vertex, specify the regional API endpoint (hostname only).
- Location
Id string - If running Gemini on Vertex, specify the location ID.
- Project
Id string - If running Gemini on Vertex, specify the project ID.
- api
Endpoint String - If running Gemini on Vertex, specify the regional API endpoint (hostname only).
- location
Id String - If running Gemini on Vertex, specify the location ID.
- project
Id String - If running Gemini on Vertex, specify the project ID.
- api
Endpoint string - If running Gemini on Vertex, specify the regional API endpoint (hostname only).
- location
Id string - If running Gemini on Vertex, specify the location ID.
- project
Id string - If running Gemini on Vertex, specify the project ID.
- api_
endpoint str - If running Gemini on Vertex, specify the regional API endpoint (hostname only).
- location_
id str - If running Gemini on Vertex, specify the location ID.
- project_
id str - If running Gemini on Vertex, specify the project ID.
- api
Endpoint String - If running Gemini on Vertex, specify the regional API endpoint (hostname only).
- location
Id String - If running Gemini on Vertex, specify the location ID.
- project
Id String - If running Gemini on Vertex, specify the project ID.
GatewayPluginAiRequestTransformerConfigLlmModelOptionsHuggingface, GatewayPluginAiRequestTransformerConfigLlmModelOptionsHuggingfaceArgs
- Use
Cache bool - Use the cache layer on the inference API
- Wait
For boolModel - Wait for the model if it is not ready
- Use
Cache bool - Use the cache layer on the inference API
- Wait
For boolModel - Wait for the model if it is not ready
- use
Cache Boolean - Use the cache layer on the inference API
- wait
For BooleanModel - Wait for the model if it is not ready
- use
Cache boolean - Use the cache layer on the inference API
- wait
For booleanModel - Wait for the model if it is not ready
- use_
cache bool - Use the cache layer on the inference API
- wait_
for_ boolmodel - Wait for the model if it is not ready
- use
Cache Boolean - Use the cache layer on the inference API
- wait
For BooleanModel - Wait for the model if it is not ready
GatewayPluginAiRequestTransformerConsumerGroup, GatewayPluginAiRequestTransformerConsumerGroupArgs
- Id string
- Id string
- id String
- id string
- id str
- id String
GatewayPluginAiRequestTransformerOrdering, GatewayPluginAiRequestTransformerOrderingArgs
GatewayPluginAiRequestTransformerOrderingAfter, GatewayPluginAiRequestTransformerOrderingAfterArgs
- Accesses List<string>
- Accesses []string
- accesses List<String>
- accesses string[]
- accesses Sequence[str]
- accesses List<String>
GatewayPluginAiRequestTransformerOrderingBefore, GatewayPluginAiRequestTransformerOrderingBeforeArgs
- Accesses List<string>
- Accesses []string
- accesses List<String>
- accesses string[]
- accesses Sequence[str]
- accesses List<String>
GatewayPluginAiRequestTransformerRoute, GatewayPluginAiRequestTransformerRouteArgs
- Id string
- Id string
- id String
- id string
- id str
- id String
GatewayPluginAiRequestTransformerService, GatewayPluginAiRequestTransformerServiceArgs
- Id string
- Id string
- id String
- id string
- id str
- id String
Import
$ pulumi import konnect:index/gatewayPluginAiRequestTransformer:GatewayPluginAiRequestTransformer my_konnect_gateway_plugin_ai_request_transformer "{ \"control_plane_id\": \"9524ec7d-36d9-465d-a8c5-83a3c9390458\", \"plugin_id\": \"3473c251-5b6c-4f45-b1ff-7ede735a366d\"}"
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- konnect kong/terraform-provider-konnect
- License
- Notes
- This Pulumi package is based on the
konnect
Terraform Provider.