Skip to content

Commit 2d1ada9

Browse files
committed
Update default AWS region in BedrockClientSettings to us-west-2
1 parent 4617f8a commit 2d1ada9

File tree

5 files changed

+48
-73
lines changed

5 files changed

+48
-73
lines changed

integration-tests/src/jvmTest/kotlin/ai/koog/integration/tests/MultipleLLMPromptExecutorIntegrationTest.kt

Lines changed: 46 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,13 @@ import ai.koog.integration.tests.utils.TestUtils
1717
import ai.koog.integration.tests.utils.TestUtils.readTestAnthropicKeyFromEnv
1818
import ai.koog.integration.tests.utils.TestUtils.readTestGoogleAIKeyFromEnv
1919
import ai.koog.integration.tests.utils.TestUtils.readTestOpenAIKeyFromEnv
20+
import ai.koog.integration.tests.utils.annotations.Retry
2021
import ai.koog.prompt.dsl.ModerationCategory
2122
import ai.koog.prompt.dsl.prompt
2223
import ai.koog.prompt.executor.clients.anthropic.AnthropicLLMClient
24+
import ai.koog.prompt.executor.clients.anthropic.AnthropicModels
2325
import ai.koog.prompt.executor.clients.google.GoogleLLMClient
26+
import ai.koog.prompt.executor.clients.google.GoogleModels
2427
import ai.koog.prompt.executor.clients.openai.OpenAILLMClient
2528
import ai.koog.prompt.executor.clients.openai.OpenAIModels
2629
import ai.koog.prompt.executor.llms.MultiLLMPromptExecutor
@@ -35,6 +38,7 @@ import ai.koog.prompt.message.AttachmentContent
3538
import ai.koog.prompt.message.Message
3639
import ai.koog.prompt.params.LLMParams.ToolChoice
3740
import kotlinx.coroutines.flow.toList
41+
import kotlinx.coroutines.runBlocking
3842
import kotlinx.coroutines.test.runTest
3943
import org.junit.jupiter.api.Assumptions.assumeTrue
4044
import org.junit.jupiter.api.BeforeAll
@@ -43,7 +47,7 @@ import org.junit.jupiter.params.provider.Arguments
4347
import org.junit.jupiter.params.provider.MethodSource
4448
import java.nio.file.Path
4549
import java.nio.file.Paths
46-
import java.util.*
50+
import java.util.Base64
4751
import java.util.stream.Stream
4852
import kotlin.io.path.pathString
4953
import kotlin.io.path.readBytes
@@ -108,6 +112,7 @@ class MultipleLLMPromptExecutorIntegrationTest {
108112
private val geminiApiKey: String get() = readTestGoogleAIKeyFromEnv()
109113
private val openAIApiKey: String get() = readTestOpenAIKeyFromEnv()
110114
private val anthropicApiKey: String get() = readTestAnthropicKeyFromEnv()
115+
private val googleApiKey: String get() = readTestGoogleAIKeyFromEnv()
111116

112117
// LLM clients
113118
private val openAIClient get() = OpenAILLMClient(openAIApiKey)
@@ -1135,4 +1140,44 @@ class MultipleLLMPromptExecutorIntegrationTest {
11351140
)
11361141
) { "Violence must be detected!" }
11371142
}
1143+
1144+
@Retry
1145+
@Test
1146+
fun integration_testMultipleSystemMessages() = runBlocking {
1147+
Models.assumeAvailable(LLMProvider.OpenAI)
1148+
Models.assumeAvailable(LLMProvider.Anthropic)
1149+
Models.assumeAvailable(LLMProvider.Google)
1150+
1151+
val openAIClient = OpenAILLMClient(openAIApiKey)
1152+
val anthropicClient = AnthropicLLMClient(anthropicApiKey)
1153+
val googleClient = GoogleLLMClient(googleApiKey)
1154+
1155+
val executor = MultiLLMPromptExecutor(
1156+
LLMProvider.OpenAI to openAIClient,
1157+
LLMProvider.Anthropic to anthropicClient,
1158+
LLMProvider.Google to googleClient
1159+
)
1160+
1161+
val prompt = prompt("multiple-system-messages-test") {
1162+
system("You are a helpful assistant.")
1163+
user("Hi")
1164+
system("You can handle multiple system messages.")
1165+
user("Respond with a short message.")
1166+
}
1167+
1168+
val modelOpenAI = OpenAIModels.CostOptimized.GPT4oMini
1169+
val modelAnthropic = AnthropicModels.Haiku_3_5
1170+
val modelGemini = GoogleModels.Gemini2_0Flash
1171+
1172+
val responseOpenAI = executor.execute(prompt, modelOpenAI)
1173+
val responseAnthropic = executor.execute(prompt, modelAnthropic)
1174+
val responseGemini = executor.execute(prompt, modelGemini)
1175+
1176+
assertTrue(responseOpenAI.content.isNotEmpty(), "OpenAI response should not be empty")
1177+
assertTrue(responseAnthropic.content.isNotEmpty(), "Anthropic response should not be empty")
1178+
assertTrue(responseGemini.content.isNotEmpty(), "Gemini response should not be empty")
1179+
println("OpenAI Response: ${responseOpenAI.content}")
1180+
println("Anthropic Response: ${responseAnthropic.content}")
1181+
println("Gemini Response: ${responseGemini.content}")
1182+
}
11381183
}

integration-tests/src/jvmTest/kotlin/ai/koog/integration/tests/MultipleSystemMessagesPromptIntegrationTest.kt

Lines changed: 0 additions & 69 deletions
This file was deleted.

prompt/prompt-executor/prompt-executor-clients/prompt-executor-bedrock-client/src/jvmMain/kotlin/ai/koog/prompt/executor/clients/bedrock/BedrockLLMClient.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ import kotlinx.serialization.json.Json
6262
* @property moderationGuardrailsSettings Optional settings of the AWS bedrock Guardrails (see [AWS documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-use-independent-api.html) ) that would be used for the [LLMClient.moderate] request
6363
*/
6464
public class BedrockClientSettings(
65-
internal val region: String = "us-east-1",
65+
internal val region: String = "us-west-2",
6666
internal val timeoutConfig: ConnectionTimeoutConfig = ConnectionTimeoutConfig(),
6767
internal val endpointUrl: String? = null,
6868
internal val maxRetries: Int = 3,

prompt/prompt-executor/prompt-executor-clients/prompt-executor-bedrock-client/src/jvmMain/kotlin/ai/koog/prompt/executor/clients/bedrock/BedrockModels.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ public object BedrockModels : LLModelDefinitions {
112112
*/
113113
public val AnthropicClaude35SonnetV2: LLModel = AnthropicModels.Sonnet_3_5.copy(
114114
provider = LLMProvider.Bedrock,
115-
id = "us.anthropic.claude-3-sonnet-20241022-v2:0",
115+
id = "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
116116
)
117117

118118
/**

prompt/prompt-executor/prompt-executor-clients/prompt-executor-bedrock-client/src/jvmTest/kotlin/ai/koog/prompt/executor/clients/bedrock/BedrockLLMClientTest.kt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,6 @@ class BedrockLLMClientTest {
402402

403403
@Test
404404
fun testToolChoiceConfiguration() {
405-
406405
// Test different tool choice configurations
407406
val autoPrompt = Prompt.build("test", params = LLMParams(toolChoice = LLMParams.ToolChoice.Auto)) {
408407
user("Search for something")

0 commit comments

Comments
 (0)