code
stringlengths
419
138k
apis
sequencelengths
1
8
extract_api
stringlengths
67
7.3k
package dev.langchain4j.model.openai; import dev.langchain4j.agent.tool.ToolExecutionRequest; import dev.langchain4j.agent.tool.ToolSpecification; import dev.langchain4j.data.message.*; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.output.Response; import dev.langchain4j.model.output.TokenUsage; import org.junit.jupiter.api.Test; import java.util.Base64; import java.util.List; import static dev.langchain4j.agent.tool.JsonSchemaProperty.INTEGER; import static dev.langchain4j.data.message.ToolExecutionResultMessage.from; import static dev.langchain4j.data.message.UserMessage.userMessage; import static dev.langchain4j.internal.Utils.readBytes; import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_3_5_TURBO; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO_1106; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_4_VISION_PREVIEW; import static dev.langchain4j.model.output.FinishReason.*; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.assertj.core.api.Assertions.assertThat; class OpenAiChatModelIT { static final String CAT_IMAGE_URL = "https://upload.wikimedia.org/wikipedia/commons/e/e9/Felis_silvestris_silvestris_small_gradual_decrease_of_quality.png"; static final String DICE_IMAGE_URL = "https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png"; ToolSpecification calculator = ToolSpecification.builder() .name("calculator") .description("returns a sum of two numbers") .addParameter("first", INTEGER) .addParameter("second", INTEGER) .build(); ChatLanguageModel model = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .temperature(0.0) .logRequests(true) .logResponses(true) .build(); ChatLanguageModel visionModel = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .modelName(GPT_4_VISION_PREVIEW) .temperature(0.0) .logRequests(true) .logResponses(true) .build(); @Test void should_generate_answer_and_return_token_usage_and_finish_reason_stop() { // given UserMessage userMessage = userMessage("What is the capital of Germany?"); // when Response<AiMessage> response = model.generate(userMessage); // then assertThat(response.content().text()).contains("Berlin"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(14); assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(STOP); } @Test void should_generate_answer_and_return_token_usage_and_finish_reason_length() { // given ChatLanguageModel model = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .maxTokens(3) .build(); UserMessage userMessage = userMessage("What is the capital of Germany?"); // when Response<AiMessage> response = model.generate(userMessage); // then assertThat(response.content().text()).isNotBlank(); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(14); assertThat(tokenUsage.outputTokenCount()).isEqualTo(3); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(LENGTH); } @Test void should_execute_a_tool_then_answer() { // given UserMessage userMessage = userMessage("2+2=?"); List<ToolSpecification> toolSpecifications = singletonList(calculator); // when Response<AiMessage> response = model.generate(singletonList(userMessage), toolSpecifications); // then AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(1); ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest.id()).isNotBlank(); assertThat(toolExecutionRequest.name()).isEqualTo("calculator"); assertThat(toolExecutionRequest.arguments()).isEqualToIgnoringWhitespace("{\"first\": 2, \"second\": 2}"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(52); assertThat(tokenUsage.outputTokenCount()).isEqualTo(18); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(TOOL_EXECUTION); // given ToolExecutionResultMessage toolExecutionResultMessage = from(toolExecutionRequest, "4"); List<ChatMessage> messages = asList(userMessage, aiMessage, toolExecutionResultMessage); // when Response<AiMessage> secondResponse = model.generate(messages); // then AiMessage secondAiMessage = secondResponse.content(); assertThat(secondAiMessage.text()).contains("4"); assertThat(secondAiMessage.toolExecutionRequests()).isNull(); TokenUsage secondTokenUsage = secondResponse.tokenUsage(); assertThat(secondTokenUsage.inputTokenCount()).isEqualTo(37); assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.totalTokenCount()) .isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount()); assertThat(secondResponse.finishReason()).isEqualTo(STOP); } @Test void should_execute_tool_forcefully_then_answer() { // given UserMessage userMessage = userMessage("2+2=?"); // when Response<AiMessage> response = model.generate(singletonList(userMessage), calculator); // then AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(1); ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest.id()).isNotBlank(); assertThat(toolExecutionRequest.name()).isEqualTo("calculator"); assertThat(toolExecutionRequest.arguments()).isEqualToIgnoringWhitespace("{\"first\": 2, \"second\": 2}"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(61); assertThat(tokenUsage.outputTokenCount()).isEqualTo(9); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(STOP); // not sure if a bug in OpenAI or stop is expected here // given ToolExecutionResultMessage toolExecutionResultMessage = from(toolExecutionRequest, "4"); List<ChatMessage> messages = asList(userMessage, aiMessage, toolExecutionResultMessage); // when Response<AiMessage> secondResponse = model.generate(messages); // then AiMessage secondAiMessage = secondResponse.content(); assertThat(secondAiMessage.text()).contains("4"); assertThat(secondAiMessage.toolExecutionRequests()).isNull(); TokenUsage secondTokenUsage = secondResponse.tokenUsage(); assertThat(secondTokenUsage.inputTokenCount()).isEqualTo(37); assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.totalTokenCount()) .isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount()); assertThat(secondResponse.finishReason()).isEqualTo(STOP); } @Test void should_execute_multiple_tools_in_parallel_then_answer() { // given ChatLanguageModel model = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .modelName(GPT_3_5_TURBO_1106) // supports parallel function calling .temperature(0.0) .build(); UserMessage userMessage = userMessage("2+2=? 3+3=?"); List<ToolSpecification> toolSpecifications = singletonList(calculator); // when Response<AiMessage> response = model.generate(singletonList(userMessage), toolSpecifications); // then AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(2); ToolExecutionRequest toolExecutionRequest1 = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest1.name()).isEqualTo("calculator"); assertThat(toolExecutionRequest1.arguments()).isEqualToIgnoringWhitespace("{\"first\": 2, \"second\": 2}"); ToolExecutionRequest toolExecutionRequest2 = aiMessage.toolExecutionRequests().get(1); assertThat(toolExecutionRequest2.name()).isEqualTo("calculator"); assertThat(toolExecutionRequest2.arguments()).isEqualToIgnoringWhitespace("{\"first\": 3, \"second\": 3}"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(57); assertThat(tokenUsage.outputTokenCount()).isEqualTo(51); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(TOOL_EXECUTION); // given ToolExecutionResultMessage toolExecutionResultMessage1 = from(toolExecutionRequest1, "4"); ToolExecutionResultMessage toolExecutionResultMessage2 = from(toolExecutionRequest2, "6"); List<ChatMessage> messages = asList(userMessage, aiMessage, toolExecutionResultMessage1, toolExecutionResultMessage2); // when Response<AiMessage> secondResponse = model.generate(messages); // then AiMessage secondAiMessage = secondResponse.content(); assertThat(secondAiMessage.text()).contains("4", "6"); assertThat(secondAiMessage.toolExecutionRequests()).isNull(); TokenUsage secondTokenUsage = secondResponse.tokenUsage(); assertThat(secondTokenUsage.inputTokenCount()).isEqualTo(83); assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.totalTokenCount()) .isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount()); assertThat(secondResponse.finishReason()).isEqualTo(STOP); } @Test void should_generate_valid_json() { //given String userMessage = "Return JSON with two fields: name and surname of Klaus Heisler. " + "Before returning, tell me a joke."; // nudging it to say something additionally to json String expectedJson = "{\"name\": \"Klaus\", \"surname\": \"Heisler\"}"; assertThat(model.generate(userMessage)).isNotEqualToIgnoringWhitespace(expectedJson); ChatLanguageModel modelGeneratingJson = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .modelName(GPT_3_5_TURBO_1106) // supports response_format = 'json_object' .responseFormat("json_object") .logRequests(true) .logResponses(true) .build(); // when String json = modelGeneratingJson.generate(userMessage); // then assertThat(json).isEqualToIgnoringWhitespace(expectedJson); } @Test void should_accept_image_url() { // given ImageContent imageContent = ImageContent.from(CAT_IMAGE_URL); UserMessage userMessage = UserMessage.from(imageContent); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()).containsIgnoringCase("cat"); assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(92); } @Test void should_accept_base64_image() { // given String base64Data = Base64.getEncoder().encodeToString(readBytes(CAT_IMAGE_URL)); ImageContent imageContent = ImageContent.from(base64Data, "image/png"); UserMessage userMessage = UserMessage.from(imageContent); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()).containsIgnoringCase("cat"); assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(92); } @Test void should_accept_text_and_image() { // given UserMessage userMessage = UserMessage.from( TextContent.from("What do you see? Reply in one word."), ImageContent.from(CAT_IMAGE_URL) ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()).containsIgnoringCase("cat"); assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(102); } @Test void should_accept_text_and_multiple_images() { // given UserMessage userMessage = UserMessage.from( TextContent.from("What do you see? Reply with one word per image."), ImageContent.from(CAT_IMAGE_URL), ImageContent.from(DICE_IMAGE_URL) ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()) .containsIgnoringCase("cat") .containsIgnoringCase("dice"); assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(189); } @Test void should_accept_text_and_multiple_images_from_different_sources() { // given UserMessage userMessage = UserMessage.from( ImageContent.from(CAT_IMAGE_URL), ImageContent.from(Base64.getEncoder().encodeToString(readBytes(DICE_IMAGE_URL)), "image/png"), TextContent.from("What do you see? Reply with one word per image.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()) .containsIgnoringCase("cat") .containsIgnoringCase("dice"); assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(189); } @Test void should_use_enum_as_model_name() { // given OpenAiChatModel model = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .modelName(GPT_3_5_TURBO) .logRequests(true) .logResponses(true) .build(); // when String response = model.generate("What is the capital of Germany?"); // then assertThat(response).containsIgnoringCase("Berlin"); } }
[ "dev.langchain4j.agent.tool.ToolSpecification.builder" ]
[((1491, 1717), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1696), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1651), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1607), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1550), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((13320, 13380), 'java.util.Base64.getEncoder'), ((15213, 15274), 'java.util.Base64.getEncoder')]
import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import java.util.HashMap; import java.util.List; import java.util.Map; import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument; import static java.util.stream.Collectors.joining; public class _12_ChatWithDocumentsExamples { static class IfYouNeedSimplicity { public static void main(String[] args) throws Exception { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(300, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Document document = loadDocument(toPath("story-about-happy-carrot.txt"), new TextDocumentParser()); ingestor.ingest(document); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) // .chatMemory() // you can override default chat memory // .promptTemplate() // you can override default prompt template .build(); String answer = chain.execute("Who is Charlie?"); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } } static class If_You_Need_More_Control { public static void main(String[] args) { // Load the document that includes the information you'd like to "chat" about with the model. Document document = loadDocument(toPath("story-about-happy-carrot.txt"), new TextDocumentParser()); // Split document into segments 100 tokens each DocumentSplitter splitter = DocumentSplitters.recursive( 100, 0, new OpenAiTokenizer("gpt-3.5-turbo") ); List<TextSegment> segments = splitter.split(document); // Embed segments (convert them into vectors that represent the meaning) using embedding model EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); List<Embedding> embeddings = embeddingModel.embedAll(segments).content(); // Store embeddings into embedding store for further search / retrieval EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); // Specify the question you want to ask the model String question = "Who is Charlie?"; // Embed the question Embedding questionEmbedding = embeddingModel.embed(question).content(); // Find relevant embeddings in embedding store by semantic similarity // You can play with parameters below to find a sweet spot for your specific use case int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); // Create a prompt for the model that includes question and relevant embeddings PromptTemplate promptTemplate = PromptTemplate.from( "Answer the following question to the best of your ability:\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Base your answer on the following information:\n" + "{{information}}"); String information = relevantEmbeddings.stream() .map(match -> match.embedded().text()) .collect(joining("\n\n")); Map<String, Object> variables = new HashMap<>(); variables.put("question", question); variables.put("information", information); Prompt prompt = promptTemplate.apply(variables); // Send the prompt to the OpenAI chat model ChatLanguageModel chatModel = OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content(); // See an answer from the model String answer = aiMessage.text(); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } } private static Path toPath(String fileName) { try { URL fileUrl = _12_ChatWithDocumentsExamples.class.getResource(fileName); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((1810, 2050), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1810, 2021), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1810, 1969), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1810, 1917), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2254, 2667), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2254, 2476), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2254, 2383), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')]
///usr/bin/env jbang "$0" "$@" ; exit $? //DEPS dev.langchain4j:langchain4j:0.25.0 //DEPS dev.langchain4j:langchain4j-ollama:0.25.0 //DEPS org.slf4j:slf4j-jdk14:2.0.10 import static java.lang.System.out; import java.util.concurrent.CountDownLatch; import dev.langchain4j.model.StreamingResponseHandler; import dev.langchain4j.model.language.StreamingLanguageModel; import dev.langchain4j.model.ollama.OllamaStreamingLanguageModel; import dev.langchain4j.model.output.Response; public class LangchainOllama { public static void main(String[] args) throws InterruptedException { StreamingLanguageModel model = OllamaStreamingLanguageModel.builder() .baseUrl("http://localhost:11434") .modelName("mistral") .temperature(0.0) .build(); String review = "What is the captial of Germany?"; out.print("Answer: "); CountDownLatch latch = new CountDownLatch(1); model.generate(review, new StreamingResponseHandler<String>() { @Override public void onNext(String token) { System.out.print(token); } @Override public void onComplete(Response<String> response) { latch.countDown(); } @Override public void onError(Throwable error) { latch.countDown(); } }); latch.await(); System.exit(0); } }
[ "dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder" ]
[((627, 797), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder'), ((627, 776), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder'), ((627, 746), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder'), ((627, 712), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder')]
package com.mindolph.base.genai.llm; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiChatModel.OpenAiChatModelBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.InetSocketAddress; import java.net.Proxy; import java.time.Duration; /** * @author [email protected] * @since 1.7 */ public class OpenAiProvider extends BaseLlmProvider { private static final Logger log = LoggerFactory.getLogger(OpenAiProvider.class); public OpenAiProvider(String apiKey, String aiModel) { super(apiKey, aiModel); } @Override protected ChatLanguageModel buildAI(float temperature) { log.info("Build OpenAI with model %s and access %s".formatted(this.aiModel, super.proxyEnabled ? "with %s proxy %s".formatted(Proxy.Type.valueOf(super.proxyType), this.proxyUrl) : "without proxy")); OpenAiChatModelBuilder builder = OpenAiChatModel.builder() .apiKey(this.apiKey) .modelName(this.aiModel) .maxRetries(1) .timeout(Duration.ofSeconds(timeout)) .temperature((double) temperature); if (super.proxyEnabled) { Proxy.Type proxyType = Proxy.Type.valueOf(super.proxyType); builder.proxy(new Proxy(proxyType, new InetSocketAddress(this.proxyHost, this.proxyPort))); } OpenAiChatModel model = builder.build(); return model; } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((884, 919), 'java.net.Proxy.Type.valueOf'), ((998, 1237), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1186), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1132), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1101), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1060), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1308, 1343), 'java.net.Proxy.Type.valueOf')]
package me.nzuguem.bot.configurations.llm; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.rag.content.Content; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.rag.query.Query; import io.quarkiverse.langchain4j.pgvector.PgVectorEmbeddingStore; import jakarta.enterprise.context.ApplicationScoped; import me.nzuguem.bot.exceptions.NotFoundRelevantException; import java.util.List; @ApplicationScoped public class GithubAppContentRetriever implements ContentRetriever { private final EmbeddingStoreContentRetriever retriever; public GithubAppContentRetriever(PgVectorEmbeddingStore embeddingStore, EmbeddingModel embeddingModel) { this.retriever = EmbeddingStoreContentRetriever.builder() .embeddingStore(embeddingStore) .embeddingModel(embeddingModel) .maxResults(20) .minScore(0.9) .build(); } @Override public List<Content> retrieve(Query query) { var result = this.retriever.retrieve(query); if (result.isEmpty()) { throw new NotFoundRelevantException("No information relevant to the configured score was found"); } return result; } }
[ "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder" ]
[((812, 1036), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 1011), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 980), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 948), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 900), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder')]
/**************************************************************************************** Copyright © 2003-2012 hbasesoft Corporation. All rights reserved. Reproduction or <br> transmission in whole or in part, in any form or by any means, electronic, mechanical <br> or otherwise, is prohibited without the prior written consent of the copyright owner. <br> ****************************************************************************************/ package com.hbasesoft.framework.langchain4j.demo; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import com.hbasesoft.framework.common.utils.PropertyHolder; import com.hbasesoft.framework.langchain4j.dashscope.QwenChatModel; import com.hbasesoft.framework.langchain4j.dashscope.QwenEmbeddingModel; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; /** * <Description> <br> * * @author 王伟<br> * @version 1.0<br> * @taskId <br> * @CreateDate 2023年10月26日 <br> * @since V1.0<br> * @see com.hbasesoft.framework.langchain4j.demo <br> */ public class ChatWithDocumentsExamples { /** * <Description> <br> * * @author 王伟<br> * @version 1.0<br> * @taskId <br> * @CreateDate 2023年10月26日 <br> * @since V1.0<br> * @see com.hbasesoft.framework.langchain4j.demo <br> */ public static class IfYouNeedSimplicity { /** NUM_500 */ private static final int NUM_500 = 500; /** * Description: <br> * * @author 王伟<br> * @taskId <br> * @param args * @throws Exception <br> */ public static void main(final String[] args) throws Exception { EmbeddingModel embeddingModel = QwenEmbeddingModel.builder() .apiKey(PropertyHolder.getProperty("qwen.apikey")).build(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(NUM_500, 0)).embeddingModel(embeddingModel) .embeddingStore(embeddingStore).build(); Document document = loadDocument(toPath("example-files/前端项目配置启动.docx")); ingestor.ingest(document); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(QwenChatModel.builder().apiKey(PropertyHolder.getProperty("qwen.apikey")).build()) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) // .chatMemory() // you can override default chat memory // .promptTemplate() // you can override default prompt template .build(); String answer = chain.execute("npm 私服的地址是什么?"); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } } private static Path toPath(final String fileName) { try { URL fileUrl = Thread.currentThread().getContextClassLoader().getResource(fileName); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((2335, 2438), 'com.hbasesoft.framework.langchain4j.dashscope.QwenEmbeddingModel.builder'), ((2335, 2430), 'com.hbasesoft.framework.langchain4j.dashscope.QwenEmbeddingModel.builder'), ((2577, 2771), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2577, 2763), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2577, 2715), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2577, 2684), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2964, 3388), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2964, 3209), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2964, 3120), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3038, 3119), 'com.hbasesoft.framework.langchain4j.dashscope.QwenChatModel.builder'), ((3038, 3111), 'com.hbasesoft.framework.langchain4j.dashscope.QwenChatModel.builder')]
package my.samples; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.loader.FileSystemDocumentLoader; import dev.langchain4j.data.document.parser.apache.pdfbox.ApachePdfBoxDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; public class LoadFord150ManualToES { public static final String ANSI_GREEN = "\u001B[32m"; public static final String ANSI_RESET = "\u001B[0m"; public static final String ANSI_YELLOW = "\u001B[33m"; public static void main(String[] args) { EmbeddingStore<TextSegment> embeddingStore = ElasticsearchEmbeddingStore.builder() .serverUrl("http://localhost:9200") .indexName("car-warranty-guide-embeddings") .dimension(384) .build(); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(300, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Path filePath = toPath("example-files/2025_US_F150_Warranty_Guide_ENG_V1.pdf"); Document document = FileSystemDocumentLoader.loadDocument(filePath, new ApachePdfBoxDocumentParser()); document.metadata().add("fileName", filePath.getFileName().toString()); document.metadata().add("filePath", filePath.toString()); document.metadata().add("company", "FORD"); document.metadata().add("product", "F150"); document.metadata().add("language", "ENG"); document.metadata().add("version", "V1"); document.metadata().add("year", "2025"); document.metadata().add("type", "Warranty Guide"); document.metadata().add("country", "US"); document.metadata().add("category", "Automotive"); ingestor.ingest(document); System.out.println(ANSI_GREEN + "Document ingested successfully" + ANSI_RESET); } private static Path toPath(String fileName) { try { // Corrected path assuming files are in src/main/resources/example-files URL fileUrl = LoadFord150ManualToES.class.getClassLoader().getResource( fileName); if (fileUrl == null) { throw new RuntimeException("Resource not found: " + fileName); } return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException("Failed to resolve URI for: " + fileName, e); } } }
[ "dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((1086, 1292), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1086, 1267), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1086, 1235), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1086, 1175), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1413, 1637), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1413, 1612), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1413, 1564), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1413, 1516), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')]
package ai.equity.salt.openai.model; import ai.equity.salt.openai.OpenAiProperties; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiLanguageModel; import lombok.RequiredArgsConstructor; import org.springframework.stereotype.Component; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO_1106; @Component @RequiredArgsConstructor public class OpenAiModelFactory { private final OpenAiProperties properties; public OpenAiLanguageModel create() { return OpenAiLanguageModel.builder() .apiKey(properties.key()) .logRequests(true) .logResponses(true) .build(); } public OpenAiChatModel createDefaultChatModel() { return OpenAiChatModel.builder() .modelName(GPT_3_5_TURBO_1106) .apiKey(properties.key()) .maxTokens(1024) .temperature(0.0) .topP(0.0) .logRequests(true) .logResponses(true) .build(); } }
[ "dev.langchain4j.model.openai.OpenAiLanguageModel.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((540, 707), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((540, 682), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((540, 646), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((540, 611), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((784, 1088), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 1063), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 1027), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 992), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 965), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 931), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 898), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 856), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package edu.whut.cs.esd.llm.smarthome; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.retriever.Retriever; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import org.springframework.boot.ApplicationRunner; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.Bean; import org.springframework.core.io.Resource; import org.springframework.core.io.ResourceLoader; import java.io.IOException; import java.util.Scanner; import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; @SpringBootApplication public class SmartHomeApplication { /** * Run SmartHomeApplicationTest to see simulated conversation with customer support agent */ @Bean ApplicationRunner interactiveChatRunner(SmartHomeAgent agent) { return args -> { Scanner scanner = new Scanner(System.in); while (true) { System.out.print("User: "); String userMessage = scanner.nextLine(); if ("exit".equalsIgnoreCase(userMessage)) { break; } String agentMessage = agent.chat(userMessage); System.out.println("Agent: " + agentMessage); } scanner.close(); }; } @Bean SmartHomeAgent customerSupportAgent(ChatLanguageModel chatLanguageModel, EnvironmentalTools environmentalTools, Retriever<TextSegment> retriever) { return AiServices.builder(SmartHomeAgent.class) .chatLanguageModel(chatLanguageModel) .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) .tools(environmentalTools) .retriever(retriever) .build(); } @Bean Retriever<TextSegment> retriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) { // You will need to adjust these parameters to find the optimal setting, which will depend on two main factors: // - The nature of your data // - The embedding model you are using // int maxResultsRetrieved = 1; int maxResultsRetrieved = 3; double minScore = 0.6; return EmbeddingStoreRetriever.from(embeddingStore, embeddingModel, maxResultsRetrieved, minScore); } @Bean EmbeddingModel embeddingModel() { return new AllMiniLmL6V2EmbeddingModel(); } @Bean EmbeddingStore<TextSegment> embeddingStore(EmbeddingModel embeddingModel, ResourceLoader resourceLoader) throws IOException { // Normally, you would already have your embedding store filled with your data. // However, for the purpose of this demonstration, we will: // 1. Create an in-memory embedding store EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); // 2. Load an example document ("Miles of Smiles" terms of use) Resource resource = resourceLoader.getResource("classpath:smart-home-product-manual.txt"); Document document = loadDocument(resource.getFile().toPath(), new TextDocumentParser()); // 3. Split the document into segments 100 tokens each // 4. Convert segments into embeddings // 5. Store embeddings into embedding store // All this can be done manually, but we will use EmbeddingStoreIngestor to automate this: // DocumentSplitter documentSplitter = DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO)); DocumentSplitter documentSplitter = DocumentSplitters.recursive(300, 0, new OpenAiTokenizer(GPT_3_5_TURBO)); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(documentSplitter) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); ingestor.ingest(document); return embeddingStore; } public static void main(String[] args) { SpringApplication.run(SmartHomeApplication.class, args); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((2504, 2777), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2752), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2714), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2671), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2598), 'dev.langchain4j.service.AiServices.builder'), ((4685, 4890), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4685, 4865), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4685, 4817), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4685, 4769), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')]
package com.example.demo; import java.time.Duration; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; public class _01_ModelParameters { public static void main(String [] args) { Duration duration = Duration.ofSeconds(60); ChatLanguageModel model = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .temperature(0.3) .timeout(duration) .logRequests(true) .logResponses(true) .build(); String prompt = "Explain in three lines how to make a code cleaner"; String response = model.generate(prompt); System.out.println(response); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((320, 486), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 473), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 449), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 426), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 403), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 381), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package com.sam.apihelpfulprofessor.service.Langchain; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.sam.apihelpfulprofessor.mapper.TopicMapper; import com.sam.apihelpfulprofessor.model.Topic; import com.sam.apihelpfulprofessor.repository.TopicRepository; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.internal.Json; import dev.langchain4j.model.inprocess.InProcessEmbeddingModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import jakarta.annotation.PostConstruct; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.core.io.ClassPathResource; import org.springframework.stereotype.Service; import static dev.langchain4j.model.inprocess.InProcessEmbeddingModelType.ALL_MINILM_L6_V2; import java.io.File; import java.io.IOException; import java.util.List; @Service public class LangChainService { private String OPENAI_TOKEN; public static OpenAiChatModel CHAT_MODEL; public static InProcessEmbeddingModel EMBEDDING_MODEL = new InProcessEmbeddingModel(ALL_MINILM_L6_V2); public static ConversationalRetrievalChain CHAIN; EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor; TopicMapper topicMapper = TopicMapper.INSTANCE; private final TopicRepository topicRepository; @Autowired public LangChainService(@Value("${openaitoken}") String token, TopicRepository topicRepository) { this.OPENAI_TOKEN = token; CHAT_MODEL = OpenAiChatModel.withApiKey(OPENAI_TOKEN); this.topicRepository = topicRepository; } // @PostConstruct public void buildEmbeddingStoreFromFile() throws IOException { List<Topic> topics = loadTopicsFromFile("static/socio-data.json"); buildEmbeddingStore(topics); } @PostConstruct public void buildEmbeddingStoreFromDb() throws IOException { List<Topic> topics = topicRepository.findAll(); buildEmbeddingStore(topics); } public void buildEmbeddingStore(List<Topic> topics) { ingestor = EmbeddingStoreIngestor.builder() .splitter(new CustomSentenceSplitter()) .embeddingModel(EMBEDDING_MODEL) .embeddingStore(embeddingStore) .build(); topics.forEach(topic -> { ingestor.ingest(Document.from(Json.toJson(topicMapper.toDto(topic)))); }); // Build LLM chain with a knowledge base of all the data from the file CHAIN = ConversationalRetrievalChain.builder() .chatLanguageModel(CHAT_MODEL) .retriever(EmbeddingStoreRetriever.from(embeddingStore, EMBEDDING_MODEL)) .build(); } private List<Topic> loadTopicsFromFile(String filename) throws IOException { // Load file and embed each object File file = new ClassPathResource(filename).getFile(); ObjectMapper mapper = new ObjectMapper(); return mapper.readValue(file, new TypeReference<List<Topic>>() {}); } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((2595, 2805), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2595, 2780), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2595, 2732), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2595, 2683), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3032, 3232), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3032, 3207), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3032, 3117), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')]
package com.pengyu.magnet.langchain4j; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.service.AiServices; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @Configuration public class Langchain4JConfig { @Bean AssessmentAgent assessmentAgent(ChatLanguageModel chatLanguageModel) { return AiServices.builder(AssessmentAgent.class) .chatLanguageModel(chatLanguageModel) // .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) .build(); } @Bean MatchAgent matchAgent(ChatLanguageModel chatLanguageModel) { return AiServices.builder(MatchAgent.class) .chatLanguageModel(chatLanguageModel) // .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) .build(); } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((458, 653), 'dev.langchain4j.service.AiServices.builder'), ((458, 553), 'dev.langchain4j.service.AiServices.builder'), ((752, 942), 'dev.langchain4j.service.AiServices.builder'), ((752, 842), 'dev.langchain4j.service.AiServices.builder')]
package it.croway; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.io.Resource; import org.springframework.core.io.ResourceLoader; import java.io.IOException; import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; @Configuration public class ConversationalAIConfiguration { @Bean ConversationalAIAgent customerSupportAgent(ChatLanguageModel chatLanguageModel, ContentRetriever contentRetriever) { return AiServices.builder(ConversationalAIAgent.class) .chatLanguageModel(chatLanguageModel) .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) .contentRetriever(contentRetriever) .build(); } @Bean ContentRetriever contentRetriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) { // You will need to adjust these parameters to find the optimal setting, which will depend on two main factors: // - The nature of your data // - The embedding model you are using int maxResults = 1; double minScore = 0.6; return EmbeddingStoreContentRetriever.builder() .embeddingStore(embeddingStore) .embeddingModel(embeddingModel) .maxResults(maxResults) .minScore(minScore) .build(); } @Bean EmbeddingModel embeddingModel() { return new AllMiniLmL6V2EmbeddingModel(); } @Bean EmbeddingStore<TextSegment> embeddingStore(EmbeddingModel embeddingModel, ResourceLoader resourceLoader) throws IOException { // Normally, you would already have your embedding store filled with your data. // However, for the purpose of this demonstration, we will: // 1. Create an in-memory embedding store EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); // camel-contributing is copied and pasted from https://camel.apache.org/camel-core/contributing/ Resource resource = resourceLoader.getResource("classpath:camel-contributing.txt"); Document document = loadDocument(resource.getFile().toPath(), new TextDocumentParser()); // 3. Split the document into segments 100 tokens each // 4. Convert segments into embeddings // 5. Store embeddings into embedding store // All this can be done manually, but we will use EmbeddingStoreIngestor to automate this: DocumentSplitter documentSplitter = DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO)); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(documentSplitter) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); ingestor.ingest(document); return embeddingStore; } }
[ "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder" ]
[((1575, 1778), 'dev.langchain4j.service.AiServices.builder'), ((1575, 1765), 'dev.langchain4j.service.AiServices.builder'), ((1575, 1725), 'dev.langchain4j.service.AiServices.builder'), ((1575, 1664), 'dev.langchain4j.service.AiServices.builder'), ((2147, 2324), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2311), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2287), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2259), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2223), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3486, 3643), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3486, 3630), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3486, 3594), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3486, 3558), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')]
import dev.langchain4j.agent.tool.Tool; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.output.Response; import dev.langchain4j.service.AiServices; public class _04_Agents { static class MyAgent { @Tool("...") void printAgent() { System.out.println("Personal secret agent activated!"); } } interface Assistant { Response<AiMessage> chat(String userMessage); } public static void main(String[] args) { String openAiKey = System.getenv("OPENAI_API_KEY"); var assistant = AiServices.builder(Assistant.class) .chatLanguageModel(OpenAiChatModel.withApiKey(openAiKey)) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .tools(new MyAgent()) .build(); var question = "What is the sum of the numbers of letters in the words 'language' and 'model'"; var response = assistant.chat(question); System.out.println(response.content().text()); System.out.println("\n\n########### TOKEN USAGE ############\n"); System.out.println(response.tokenUsage()); } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((696, 941), 'dev.langchain4j.service.AiServices.builder'), ((696, 916), 'dev.langchain4j.service.AiServices.builder'), ((696, 878), 'dev.langchain4j.service.AiServices.builder'), ((696, 805), 'dev.langchain4j.service.AiServices.builder')]
package com.jibanez.clgeneratoraiservice; import com.jibanez.clgeneratoraiservice.service.CoverLetterAiService; import com.jibanez.clgeneratoraiservice.service.JobDetailsExtractorAiService; import com.jibanez.clgeneratoraiservice.util.AiDemoService; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.Bean; import org.springframework.core.io.Resource; import org.springframework.core.io.ResourceLoader; import java.io.IOException; import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument; @SpringBootApplication public class ClGeneratorAiServiceApplication { public static void main(String[] args) { SpringApplication.run(ClGeneratorAiServiceApplication.class, args); } @Bean AiDemoService aiDemoService(ChatLanguageModel chatLanguageModel, // BookingTools bookingTools, ContentRetriever contentRetriever) { return AiServices.builder(AiDemoService.class) .chatLanguageModel(chatLanguageModel) .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) // .tools(bookingTools) .contentRetriever(contentRetriever) .build(); } @Bean CoverLetterAiService coverLetterAiService(ChatLanguageModel chatLanguageModel, // BookingTools bookingTools, ContentRetriever contentRetriever) { return AiServices.builder(CoverLetterAiService.class) .chatLanguageModel(chatLanguageModel) .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) // .tools(bookingTools) .contentRetriever(contentRetriever) .build(); } @Bean JobDetailsExtractorAiService jobDetailsExtractorAiService(ChatLanguageModel chatLanguageModel) { return AiServices.builder(JobDetailsExtractorAiService.class) .chatLanguageModel(chatLanguageModel) .build(); } @Bean ContentRetriever contentRetriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) { // You will need to adjust these parameters to find the optimal setting, which will depend on two main factors: // - The nature of your data // - The embedding model you are using int maxResults = 10; double minScore = 0.6; return EmbeddingStoreContentRetriever.builder() .embeddingStore(embeddingStore) .embeddingModel(embeddingModel) .maxResults(maxResults) .minScore(minScore) .build(); } @Bean EmbeddingModel embeddingModel() { return new AllMiniLmL6V2EmbeddingModel(); } @Bean EmbeddingStore<TextSegment> embeddingStore(EmbeddingModel embeddingModel, ResourceLoader resourceLoader) throws IOException { // Normally, you would already have your embedding store filled with your data. // However, for the purpose of this demonstration, we will: // 1. Create an in-memory embedding store EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); // 2. Load a document with user data Resource resource = resourceLoader.getResource("classpath:user-data.txt"); Document document = loadDocument(resource.getFile().toPath(), new TextDocumentParser()); // 3. Split the document into segments 300 tokens each // 4. Convert segments into embeddings // 5. Store embeddings into embedding store // All this can be done manually, but we will use EmbeddingStoreIngestor to automate this: DocumentSplitter documentSplitter = DocumentSplitters.recursive(300, 0); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(documentSplitter) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); ingestor.ingest(document); return embeddingStore; } }
[ "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder" ]
[((1966, 2248), 'dev.langchain4j.service.AiServices.builder'), ((1966, 2223), 'dev.langchain4j.service.AiServices.builder'), ((1966, 2132), 'dev.langchain4j.service.AiServices.builder'), ((1966, 2059), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2812), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2787), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2696), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2623), 'dev.langchain4j.service.AiServices.builder'), ((2947, 3080), 'dev.langchain4j.service.AiServices.builder'), ((2947, 3055), 'dev.langchain4j.service.AiServices.builder'), ((3495, 3732), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3707), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3671), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3631), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3583), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((4891, 5096), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4891, 5071), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4891, 5023), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4891, 4975), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')]
/* * Copyright 2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package services.ai; import com.google.cloud.vertexai.VertexAI; import com.google.cloud.vertexai.api.Content; import com.google.cloud.vertexai.generativeai.GenerativeModel; import com.google.cloud.vertexai.api.GenerateContentResponse; import com.google.cloud.vertexai.api.GenerationConfig; import com.google.cloud.vertexai.api.SafetySetting; import com.google.cloud.vertexai.api.HarmCategory; import com.google.cloud.vertexai.api.Part; import com.google.cloud.vertexai.api.Blob; import com.google.protobuf.ByteString; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.model.output.Response; import dev.langchain4j.model.vertexai.VertexAiChatModel; import dev.langchain4j.model.vertexai.VertexAiLanguageModel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Service; import services.config.CloudConfig; import services.utility.CloudUtility; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @Service public class VertexAIClient { private static final Logger logger = LoggerFactory.getLogger(VertexAIClient.class); public GenerateContentResponse promptOnImage(byte[] image) throws IOException { return promptOnImage(image, ""); } public GenerateContentResponse promptOnImage(byte[] image, String prompt) throws IOException { GenerateContentResponse response = null; if(prompt== null ||prompt.isBlank()) prompt = "Extract the book name, labels, main color, and author strictly in JSON format. The json output strictly have property names bookName, mainColor, author, and labels."; String location = CloudUtility.extractRegion(CloudConfig.zone); try (VertexAI vertexAI = new VertexAI(CloudConfig.projectID, location)) { GenerationConfig generationConfig = GenerationConfig.newBuilder() .setMaxOutputTokens(2048) .setTemperature(0.4F) .setTopK(32) .setTopP(1F) .build(); GenerativeModel model = new GenerativeModel(VertexModels.GEMINI_PRO_VISION_VERSION, generationConfig, vertexAI); List<SafetySetting> safetySettings = Arrays.asList( SafetySetting.newBuilder() .setCategory(HarmCategory.HARM_CATEGORY_HATE_SPEECH) .setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE) .build(), SafetySetting.newBuilder() .setCategory(HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT) .setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE) .build(), SafetySetting.newBuilder() .setCategory(HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT) .setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE) .build(), SafetySetting.newBuilder() .setCategory(HarmCategory.HARM_CATEGORY_HARASSMENT) .setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE) .build() ); List<Content> contents = new ArrayList<>(); contents.add(Content.newBuilder().setRole("user").addParts(Part.newBuilder().setInlineData(Blob.newBuilder().setMimeType("image/png") .setData(ByteString.copyFrom(image)))) .addParts(Part.newBuilder().setText(prompt)) .build()); // ResponseStream<GenerateContentResponse> responseStream = model.generateContentStream(contents, safetySettings); response = model.generateContent(contents, safetySettings); logger.info(response.toString()); } return response; } public String promptModel(String prompt, String modelName) { String output =null; logger.info("The prompt & model name are: " + prompt.substring(0,100) +" | "+modelName); if (modelName.contains("chat")) { VertexAiChatModel vertexAiChatModel = VertexAiChatModel.builder() .endpoint("us-central1-aiplatform.googleapis.com:443") .project(CloudConfig.projectID) .location(CloudConfig.zone) .publisher("google") .modelName(modelName) .temperature(0.1) .maxOutputTokens(1000) .topK(0) .topP(0.0) .maxRetries(3) .build(); Response<AiMessage> modelResponse = vertexAiChatModel.generate(UserMessage.from(prompt)); output = modelResponse.content().text(); } else { VertexAiLanguageModel vertexAiTextModel = VertexAiLanguageModel.builder() .endpoint("us-central1-aiplatform.googleapis.com:443") .project(CloudConfig.projectID) .location(CloudConfig.zone) .publisher("google") .modelName(modelName) .temperature(0.1) .maxOutputTokens(1000) .topK(0) .topP(0.0) .maxRetries(3) .build(); Response<String> textResponse = vertexAiTextModel.generate(prompt); output = textResponse.content(); } logger.info(output); return output; } }
[ "dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder", "dev.langchain4j.model.vertexai.VertexAiChatModel.builder" ]
[((2519, 2771), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2734), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2693), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2652), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2602), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2982, 3225), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((2982, 3188), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((2982, 3089), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3247, 3496), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3247, 3459), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3247, 3360), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3518, 3767), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3518, 3730), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3518, 3631), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3789, 4031), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3789, 3994), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3789, 3895), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((4128, 4409), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4128, 4380), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4128, 4315), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4128, 4164), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4174, 4314), 'com.google.cloud.vertexai.api.Part.newBuilder'), ((4206, 4313), 'com.google.cloud.vertexai.api.Blob.newBuilder'), ((4206, 4248), 'com.google.cloud.vertexai.api.Blob.newBuilder'), ((4346, 4379), 'com.google.cloud.vertexai.api.Part.newBuilder'), ((4981, 5471), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5442), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5407), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5376), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5347), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5304), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5266), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5224), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5183), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5135), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5083), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((5699, 6193), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6164), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6129), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6098), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6069), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6026), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5988), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5946), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5905), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5857), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5805), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder')]
package com.kchandrakant; import dev.langchain4j.agent.tool.Tool; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; public class ServiceWithTools { static class Calculator { @Tool("Calculates the length of a string") int stringLength(String s) { return s.length(); } @Tool("Calculates the sum of two numbers") int add(int a, int b) { return a + b; } @Tool("Calculates the square root of a number") double sqrt(int x) { return Math.sqrt(x); } } interface Assistant { String chat(String userMessage); } public static void main(String[] args) { Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .tools(new Calculator()) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .build(); //String question = "What is the square root of the sum of the numbers of letters in the words \"hello\" and \"world\"?"; String question = "What is the sum of the numbers of letters in the words \"language\" and \"model\"?"; String answer = assistant.chat(question); System.out.println(answer); // The square root of the sum of the number of letters in the words "hello" and "world" is approximately 3.162. } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((824, 1085), 'dev.langchain4j.service.AiServices.builder'), ((824, 1060), 'dev.langchain4j.service.AiServices.builder'), ((824, 987), 'dev.langchain4j.service.AiServices.builder'), ((824, 946), 'dev.langchain4j.service.AiServices.builder')]
package bot; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.huggingface.HuggingFaceChatModel; import dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static java.time.Duration.ofSeconds; import java.io.File; public class Bot { //HuggingFace API public static final String HF_API_KEY = "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"; public static String perguntar(String arquivo, String pergunta) throws Exception { Document document = loadDocument(toPath(arquivo)); //choosing an embedding model EmbeddingModel embeddingModel = HuggingFaceEmbeddingModel.builder() .accessToken(HF_API_KEY) .modelId("sentence-transformers/all-MiniLM-L6-v2") .waitForModel(true) .timeout(ofSeconds(90)) .build(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); //embedding doc EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(200)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); ingestor.ingest(document); //choosing a model to predict ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(HuggingFaceChatModel.withAccessToken(HF_API_KEY)) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) // .promptTemplate() // you can override default prompt template .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) // .chatMemory() // you can override default chat memory .build(); //predict String answer = chain.execute(pergunta); return answer; } private static Path toPath(String fileName) throws MalformedURLException { try { File file = new File(fileName); URL fileUrl = file.toURI().toURL(); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((1383, 1627), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1602), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1562), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1526), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1459), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1790, 2005), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1790, 1980), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1790, 1932), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1790, 1884), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2155, 2617), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2155, 2535), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2155, 2367), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2155, 2278), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')]
package dev.morvan.interceptor; import static dev.langchain4j.data.document.splitter.DocumentSplitters.recursive; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.loader.FileSystemDocumentLoader; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import io.quarkiverse.langchain4j.redis.RedisEmbeddingStore; import io.quarkus.runtime.StartupEvent; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import java.io.File; import java.util.Collections; import java.util.List; import lombok.extern.slf4j.Slf4j; import org.eclipse.microprofile.config.inject.ConfigProperty; @Slf4j @ApplicationScoped public class Ingestor { @Inject RedisEmbeddingStore store; @Inject EmbeddingModel embeddingModel; @ConfigProperty(name = "prompts.init.filepath") String originalDirPath; public void ingest(@Observes StartupEvent event) { List<Document> documents = FileSystemDocumentLoader.loadDocuments( new File(originalDirPath).toPath(), new TextDocumentParser()); ingest(documents); } public void ingest(String filePath) { Document document = FileSystemDocumentLoader.loadDocument( new File(filePath).toPath(), new TextDocumentParser()); ingest(Collections.singletonList(document)); log.info("Ingested document: {}", filePath); } private void ingest(List<Document> documents) { log.info("Ingesting documents..."); var ingestor = EmbeddingStoreIngestor.builder() .embeddingStore(store) .embeddingModel(embeddingModel) .documentSplitter(recursive(500, 0)) .build(); ingestor.ingest(documents); log.info("Ingested {} documents", documents.size()); } }
[ "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((1724, 1921), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1724, 1896), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1724, 1843), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1724, 1795), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')]
package com.egineering.ai.llmjavademo.agents; import com.egineering.ai.llmjavademo.configurations.LlmConfiguration; import com.egineering.ai.llmjavademo.dtos.LlmResponse; import com.egineering.ai.llmjavademo.dtos.MessageForm; import com.egineering.ai.llmjavademo.dtos.StreamingLlmResponse; import com.egineering.ai.llmjavademo.models.chromadbapi.Collection; import com.egineering.ai.llmjavademo.models.chromadbapi.QueryRequest; import com.egineering.ai.llmjavademo.models.chromadbapi.QueryResponse; import com.egineering.ai.llmjavademo.services.ChromaClient; import dev.langchain4j.data.document.Metadata; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.SystemMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.StreamingResponseHandler; import dev.langchain4j.model.chat.StreamingChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.output.Response; import dev.langchain4j.store.embedding.EmbeddingMatch; import lombok.SneakyThrows; import org.springframework.beans.factory.annotation.Value; import org.springframework.core.io.Resource; import org.springframework.messaging.simp.SimpMessagingTemplate; import org.springframework.stereotype.Service; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; @Service public class DocumentStreamingAgent { @Value("classpath:prompts/documentsSystemMessage.st") private Resource documentsSystemMessage; private final SimpMessagingTemplate messagingTemplate; private final EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); private final ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(20); private final StreamingChatLanguageModel streamingChatModel; private final ChromaClient chromaClient; private final String collectionId; public DocumentStreamingAgent(LlmConfiguration configuration, StreamingChatLanguageModel streamingChatModel, SimpMessagingTemplate messagingTemplate) { this.messagingTemplate = messagingTemplate; this.streamingChatModel = streamingChatModel; this.chromaClient = new ChromaClient("http://localhost:8000", Duration.of(10, ChronoUnit.SECONDS)); Collection collection = chromaClient.collection("documents"); this.collectionId = collection.getId(); } @SneakyThrows public StreamingLlmResponse generate(MessageForm form) { String prompt; try { prompt = documentsSystemMessage.getContentAsString(StandardCharsets.UTF_8); } catch (IOException ioe) { prompt = form.message(); } Embedding questionEmbedding = embeddingModel.embed(form.message()).content(); QueryRequest queryRequest = new QueryRequest(questionEmbedding.vectorAsList(), 10); QueryResponse queryResponse = chromaClient.queryCollection(this.collectionId, queryRequest); List<EmbeddingMatch<TextSegment>> matches = toEmbeddingMatches(queryResponse); List<String> documents = matches.stream() .map(textSegmentEmbeddingMatch -> textSegmentEmbeddingMatch.embedded().text()) .toList(); String documentString = String.join("\n", documents); SystemMessage systemMessage = PromptTemplate.from(prompt) .apply(Map.of("documents", documentString)) .toSystemMessage(); chatMemory.add(systemMessage); chatMemory.add(UserMessage.from(form.message())); CompletableFuture<AiMessage> futureAiMessage = new CompletableFuture<>(); StreamingResponseHandler<AiMessage> handler = new StreamingResponseHandler<>() { @Override public void onNext(String token) { messagingTemplate.convertAndSend("/topic/documents/llmStreamingResponse", new LlmResponse(token)); } @Override public void onComplete(Response<AiMessage> response) { futureAiMessage.complete(response.content()); } @Override public void onError(Throwable error) { error.printStackTrace(); } }; streamingChatModel.generate(chatMemory.messages(), handler); chatMemory.add(futureAiMessage.get()); Set<String> files = queryResponse.metadatas().get(0).stream() .map(map -> map.get("file_name")) .collect(Collectors.toSet()); return new StreamingLlmResponse(chatMemory.messages(), documents, files); } public void reset() { chatMemory.clear(); } private static List<EmbeddingMatch<TextSegment>> toEmbeddingMatches(QueryResponse queryResponse) { List<EmbeddingMatch<TextSegment>> embeddingMatches = new ArrayList<>(); for(int i = 0; i < queryResponse.ids().get(0).size(); ++i) { double score = distanceToScore((Double)((List<?>)queryResponse.distances().get(0)).get(i)); String embeddingId = (String)((List<?>)queryResponse.ids().get(0)).get(i); Embedding embedding = Embedding.from((List)((List<?>)queryResponse.embeddings().get(0)).get(i)); TextSegment textSegment = toTextSegment(queryResponse, i); embeddingMatches.add(new EmbeddingMatch<>(score, embeddingId, embedding, textSegment)); } return embeddingMatches; } private static double distanceToScore(double distance) { return 1.0 - distance / 2.0; } private static TextSegment toTextSegment(QueryResponse queryResponse, int i) { String text = (String)((List<?>)queryResponse.documents().get(0)).get(i); Map<String, String> metadata = (Map)((List<?>)queryResponse.metadatas().get(0)).get(i); return text == null ? null : TextSegment.from(text, metadata == null ? new Metadata() : new Metadata(metadata)); } }
[ "dev.langchain4j.model.input.PromptTemplate.from" ]
[((3866, 3988), 'dev.langchain4j.model.input.PromptTemplate.from'), ((3866, 3953), 'dev.langchain4j.model.input.PromptTemplate.from')]
package com.kchandrakant; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.io.File; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import java.util.HashMap; import java.util.List; import java.util.Map; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.time.Duration.ofSeconds; import static java.util.stream.Collectors.joining; public class ChatWithDocuments { public static void main(String[] args) { // Load the document that includes the information you'd like to "chat" about with the model. Document document = loadDocument(toPath("src/main/resources/example-files/simpson's_adventures.txt")); // Split document into segments 100 tokens each DocumentSplitter splitter = DocumentSplitters.recursive( 100, 0, new OpenAiTokenizer(GPT_3_5_TURBO) ); List<TextSegment> segments = splitter.split(document); // Embed segments (convert them into vectors that represent the meaning) using embedding model EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); List<Embedding> embeddings = embeddingModel.embedAll(segments).content(); // Store embeddings into embedding store for further search / retrieval EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); // Specify the question you want to ask the model String question = "Who is Simpson?"; // Embed the question Embedding questionEmbedding = embeddingModel.embed(question).content(); // Find relevant embeddings in embedding store by semantic similarity // You can play with parameters below to find a sweet spot for your specific use case int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); // Create a prompt for the model that includes question and relevant embeddings PromptTemplate promptTemplate = PromptTemplate.from( "Answer the following question to the best of your ability:\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Base your answer on the following information:\n" + "{{information}}"); String information = relevantEmbeddings.stream() .map(match -> match.embedded().text()) .collect(joining("\n\n")); Map<String, Object> variables = new HashMap<>(); variables.put("question", question); variables.put("information", information); Prompt prompt = promptTemplate.apply(variables); // Send the prompt to the OpenAI chat model ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content(); // See an answer from the model String answer = aiMessage.text(); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } private static Path toPath(String fileName) { try { //URL fileUrl = ChatWithDocuments.class.getResource(fileName); URL fileUrl = new File(fileName).toURI().toURL(); System.out.println(new File(fileName).toURI().toURL()); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException | MalformedURLException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((4007, 4145), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((4007, 4120), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((4007, 4080), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
import org.testcontainers.containers.PostgreSQLContainer; import org.testcontainers.utility.DockerImageName; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore; import java.util.List; public class PGVectorEmbeddingStoreExample { public static void main(String[] args) { PostgreSQLContainer<?> postgreSQLContainer = new PostgreSQLContainer<>( DockerImageName.parse("ankane/pgvector:v0.5.1").asCompatibleSubstituteFor("postgres")); postgreSQLContainer.start(); EmbeddingStore<TextSegment> embeddingStore = PgVectorEmbeddingStore.builder() .host(postgreSQLContainer.getHost()) .port(postgreSQLContainer.getFirstMappedPort()) .database(postgreSQLContainer.getDatabaseName()) .user(postgreSQLContainer.getUsername()) .password(postgreSQLContainer.getPassword()) .table("test") .dimension(384) .build(); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); TextSegment segment1 = TextSegment.from("I like football."); Embedding embedding1 = embeddingModel.embed(segment1).content(); embeddingStore.add(embedding1, segment1); TextSegment segment2 = TextSegment.from("The weather is good today."); Embedding embedding2 = embeddingModel.embed(segment2).content(); embeddingStore.add(embedding2, segment2); Embedding queryEmbedding = embeddingModel.embed("What is your favourite sport?").content(); List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(queryEmbedding, 1); EmbeddingMatch<TextSegment> embeddingMatch = relevant.get(0); System.out.println(embeddingMatch.score()); // 0.8144288659095 System.out.println(embeddingMatch.embedded().text()); // I like football. postgreSQLContainer.stop(); } }
[ "dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder" ]
[((744, 829), 'org.testcontainers.utility.DockerImageName.parse'), ((926, 1354), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1328), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1295), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1263), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1201), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1143), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1077), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1012), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder')]
import dev.langchain4j.code.Judge0JavaScriptExecutionTool; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import static java.time.Duration.ofSeconds; public class ServiceWithDynamicToolsExample { interface Assistant { String chat(String message); } public static void main(String[] args) { Judge0JavaScriptExecutionTool judge0Tool = new Judge0JavaScriptExecutionTool(ApiKeys.RAPID_API_KEY); ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .temperature(0.0) .timeout(ofSeconds(60)) .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) .tools(judge0Tool) .build(); interact(assistant, "What is the square root of 49506838032859?"); interact(assistant, "Capitalize every third letter: abcabc"); interact(assistant, "What is the number of hours between 17:00 on 21 Feb 1988 and 04:00 on 12 Apr 2014?"); } private static void interact(Assistant assistant, String userMessage) { System.out.println("[User]: " + userMessage); String answer = assistant.chat(userMessage); System.out.println("[Assistant]: " + answer); System.out.println(); System.out.println(); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((634, 806), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 781), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 741), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 707), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((839, 1061), 'dev.langchain4j.service.AiServices.builder'), ((839, 1036), 'dev.langchain4j.service.AiServices.builder'), ((839, 1001), 'dev.langchain4j.service.AiServices.builder'), ((839, 928), 'dev.langchain4j.service.AiServices.builder')]
import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.MemoryId; import dev.langchain4j.service.UserMessage; public class ServiceWithMemoryForEachUserExample { interface Assistant { String chat(@MemoryId int memoryId, @UserMessage String userMessage); } public static void main(String[] args) { Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(10)) .build(); System.out.println(assistant.chat(1, "Hello, my name is Klaus")); // Hi Klaus! How can I assist you today? System.out.println(assistant.chat(2, "Hello, my name is Francine")); // Hello Francine! How can I assist you today? System.out.println(assistant.chat(1, "What is my name?")); // Your name is Klaus. System.out.println(assistant.chat(2, "What is my name?")); // Your name is Francine. } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((482, 722), 'dev.langchain4j.service.AiServices.builder'), ((482, 697), 'dev.langchain4j.service.AiServices.builder'), ((482, 604), 'dev.langchain4j.service.AiServices.builder')]
import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore; import java.util.List; import static dev.langchain4j.internal.Utils.randomUUID; public class ChromaEmbeddingStoreExample { /** * To run this example, ensure you have Chroma running locally. If not, then: * - Execute "docker pull ghcr.io/chroma-core/chroma:0.4.6" * - Execute "docker run -d -p 8000:8000 ghcr.io/chroma-core/chroma:0.4.6" * - Wait until Chroma is ready to serve (may take a few minutes) */ public static void main(String[] args) { EmbeddingStore<TextSegment> embeddingStore = ChromaEmbeddingStore.builder() .baseUrl("http://localhost:8000") .collectionName(randomUUID()) .build(); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); TextSegment segment1 = TextSegment.from("I like football."); Embedding embedding1 = embeddingModel.embed(segment1).content(); embeddingStore.add(embedding1, segment1); TextSegment segment2 = TextSegment.from("The weather is good today."); Embedding embedding2 = embeddingModel.embed(segment2).content(); embeddingStore.add(embedding2, segment2); Embedding queryEmbedding = embeddingModel.embed("What is your favourite sport?").content(); List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(queryEmbedding, 1); EmbeddingMatch<TextSegment> embeddingMatch = relevant.get(0); System.out.println(embeddingMatch.score()); // 0.8144288493114709 System.out.println(embeddingMatch.embedded().text()); // I like football. } }
[ "dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder" ]
[((937, 1088), 'dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder'), ((937, 1063), 'dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder'), ((937, 1017), 'dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder')]
import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.memory.chat.ChatMemoryStore; import org.mapdb.DB; import org.mapdb.DBMaker; import java.util.List; import java.util.Map; import static dev.langchain4j.data.message.ChatMessageDeserializer.messagesFromJson; import static dev.langchain4j.data.message.ChatMessageSerializer.messagesToJson; import static org.mapdb.Serializer.STRING; public class ServiceWithPersistentMemoryExample { interface Assistant { String chat(String message); } public static void main(String[] args) { ChatMemory chatMemory = MessageWindowChatMemory.builder() .maxMessages(10) .chatMemoryStore(new PersistentChatMemoryStore()) .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .chatMemory(chatMemory) .build(); String answer = assistant.chat("Hello! My name is Klaus."); System.out.println(answer); // Hello Klaus! How can I assist you today? // Now, comment out the two lines above, uncomment the two lines below, and run again. // String answerWithName = assistant.chat("What is my name?"); // System.out.println(answerWithName); // Your name is Klaus. } // You can create your own implementation of ChatMemoryStore and store chat memory whenever you'd like static class PersistentChatMemoryStore implements ChatMemoryStore { private final DB db = DBMaker.fileDB("chat-memory.db").transactionEnable().make(); private final Map<String, String> map = db.hashMap("messages", STRING, STRING).createOrOpen(); @Override public List<ChatMessage> getMessages(Object memoryId) { String json = map.get((String) memoryId); return messagesFromJson(json); } @Override public void updateMessages(Object memoryId, List<ChatMessage> messages) { String json = messagesToJson(messages); map.put((String) memoryId, json); db.commit(); } @Override public void deleteMessages(Object memoryId) { map.remove((String) memoryId); db.commit(); } } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder" ]
[((809, 966), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((809, 941), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((809, 875), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((999, 1186), 'dev.langchain4j.service.AiServices.builder'), ((999, 1161), 'dev.langchain4j.service.AiServices.builder'), ((999, 1121), 'dev.langchain4j.service.AiServices.builder'), ((1792, 1851), 'org.mapdb.DBMaker.fileDB'), ((1792, 1844), 'org.mapdb.DBMaker.fileDB')]
/* * Copyright 2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package services.ai; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.ImageContent; import dev.langchain4j.data.message.SystemMessage; import dev.langchain4j.data.message.TextContent; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.output.Response; import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; import dev.langchain4j.service.AiServices; import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; @Service public class VertexAIClient { private static final Logger logger = LoggerFactory.getLogger(VertexAIClient.class); @Value("${spring.ai.vertex.ai.gemini.project-id}") private String project; @Value("${spring.ai.vertex.ai.gemini.location}") private String location; public String promptOnImage(String prompt, String bucketName, String fileName) throws IOException { long start = System.currentTimeMillis(); // bucket where image has been uploaded String imageURL = String.format("gs://%s/%s",bucketName, fileName); UserMessage userMessage = UserMessage.from( // ImageContent.from(Base64.getEncoder().encodeToString(readBytes("https://storage.googleapis.com/vision-optimize-serverless-apps/TheJungleBook.jpg")), "image/jpeg"), ImageContent.from(imageURL), TextContent.from(prompt) ); ChatLanguageModel visionModel = VertexAiGeminiChatModel.builder() .project(project) .location(location) .modelName(VertexModels.GEMINI_PRO_VISION) .build(); Response<AiMessage> multiModalResponse = visionModel.generate(userMessage); String response = multiModalResponse.content().text(); logger.info("Multi-modal response: " + response); // response from Vertex is in Markdown, remove annotations response = response.replaceAll("```json", "").replaceAll("```", "").replace("'", "\""); logger.info("Elapsed time (chat model): " + (System.currentTimeMillis() - start) + "ms"); // return the response in String format, extract values in caller return response; } public String promptModel(String prompt) { long start = System.currentTimeMillis(); logger.info("Chat model: " + prompt); ChatLanguageModel model = VertexAiGeminiChatModel.builder() .project(project) .location(location) .modelName(VertexModels.GEMINI_PRO) .build(); // prompt Chat model String output = model.generate(prompt); logger.info("Elapsed time (chat model, with SpringAI): " + (System.currentTimeMillis() - start) + "ms"); logger.info("Chat Model output: " + output); // return model response in String format return output; } interface Assistant { String chat(UserMessage userMessage); } public String promptModelwithFunctionCalls(SystemMessage systemMessage, UserMessage userMessage, Object function) { long start = System.currentTimeMillis(); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); // chatMemory.add(systemMessage); ChatLanguageModel model = VertexAiGeminiChatModel.builder() .project(project) .location(location) .modelName(VertexModels.GEMINI_PRO) .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(model) .chatMemory(chatMemory) .tools(function) .build(); String output = assistant.chat(userMessage); logger.info("Elapsed time (chat model, with Langchain4J): " + (System.currentTimeMillis() - start) + "ms"); logger.info("Chat Model output with Function Call: " + output); // return model response in String format return output; } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder" ]
[((2356, 2527), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((2356, 2506), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((2356, 2451), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((2356, 2419), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3446), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3425), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3377), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3345), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4446), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4425), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4377), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4345), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4479, 4638), 'dev.langchain4j.service.AiServices.builder'), ((4479, 4617), 'dev.langchain4j.service.AiServices.builder'), ((4479, 4588), 'dev.langchain4j.service.AiServices.builder'), ((4479, 4552), 'dev.langchain4j.service.AiServices.builder')]
import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.model.StreamingResponseHandler; import dev.langchain4j.model.openai.OpenAiStreamingChatModel; import java.util.ArrayList; import java.util.List; import static java.time.Duration.ofSeconds; public class _06_FewShot { public static void main(String[] args) { OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .timeout(ofSeconds(100)) .build(); List<ChatMessage> fewShotHistory = new ArrayList<>(); // Adding positive feedback example to history fewShotHistory.add(UserMessage.from( "I love the new update! The interface is very user-friendly and the new features are amazing!")); fewShotHistory.add(AiMessage.from( "Action: forward input to positive feedback storage\nReply: Thank you very much for this great feedback! We have transmitted your message to our product development team who will surely be very happy to hear this. We hope you continue enjoying using our product.")); // Adding negative feedback example to history fewShotHistory.add(UserMessage .from("I am facing frequent crashes after the new update on my Android device.")); fewShotHistory.add(AiMessage.from( "Action: open new ticket - crash after update Android\nReply: We are so sorry to hear about the issues you are facing. We have reported the problem to our development team and will make sure this issue is addressed as fast as possible. We will send you an email when the fix is done, and we are always at your service for any further assistance you may need.")); // Adding another positive feedback example to history fewShotHistory.add(UserMessage .from("Your app has made my daily tasks so much easier! Kudos to the team!")); fewShotHistory.add(AiMessage.from( "Action: forward input to positive feedback storage\nReply: Thank you so much for your kind words! We are thrilled to hear that our app is making your daily tasks easier. Your feedback has been shared with our team. We hope you continue to enjoy using our app!")); // Adding another negative feedback example to history fewShotHistory.add(UserMessage .from("The new feature is not working as expected. It’s causing data loss.")); fewShotHistory.add(AiMessage.from( "Action: open new ticket - data loss by new feature\nReply:We apologize for the inconvenience caused. Your feedback is crucial to us, and we have reported this issue to our technical team. They are working on it on priority. We will keep you updated on the progress and notify you once the issue is resolved. Thank you for your patience and support.")); // Adding real user's message ChatMessage customerComplaint = UserMessage .from("How can your app be so slow? Please do something about it!"); fewShotHistory.add(customerComplaint); System.out.println("[User]: " + customerComplaint.text()); System.out.print("[LLM]: "); model.generate(fewShotHistory, new StreamingResponseHandler<AiMessage>() { @Override public void onNext(String token) { System.out.print(token); } @Override public void onError(Throwable throwable) { } }); // Extract reply and send to customer // Perform necessary action in back-end } }
[ "dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder" ]
[((475, 623), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((475, 598), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((475, 557), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder')]
import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.structured.StructuredPrompt; import dev.langchain4j.model.input.structured.StructuredPromptProcessor; import dev.langchain4j.model.openai.OpenAiChatModel; import java.util.List; import static java.time.Duration.ofSeconds; import static java.util.Arrays.asList; public class StructuredPromptTemplateExamples { static ChatLanguageModel model = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); static class Simple_Structured_Prompt_Example { @StructuredPrompt("Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}") static class CreateRecipePrompt { private String dish; private List<String> ingredients; } public static void main(String[] args) { CreateRecipePrompt createRecipePrompt = new CreateRecipePrompt(); createRecipePrompt.dish = "salad"; createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives"); Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt); AiMessage aiMessage = model.generate(prompt.toUserMessage()).content(); System.out.println(aiMessage.text()); } } static class Multi_Line_Structured_Prompt_Example { @StructuredPrompt({ "Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}.", "Structure your answer in the following way:", "Recipe name: ...", "Description: ...", "Preparation time: ...", "Required ingredients:", "- ...", "- ...", "Instructions:", "- ...", "- ..." }) static class CreateRecipePrompt { private String dish; private List<String> ingredients; } public static void main(String[] args) { CreateRecipePrompt createRecipePrompt = new CreateRecipePrompt(); createRecipePrompt.dish = "salad"; createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives"); Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt); AiMessage aiMessage = model.generate(prompt.toUserMessage()).content(); System.out.println(aiMessage.text()); } } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((528, 654), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((528, 633), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((528, 597), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package zin.rashidi.boot.langchain4j.history; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.env.Environment; import static dev.langchain4j.memory.chat.MessageWindowChatMemory.withMaxMessages; /** * @author Rashidi Zin */ @Configuration class HistorianConfiguration { @Bean Historian historian(ChatLanguageModel model, ContentRetriever retriever, HistorianTool tool) { return AiServices.builder(Historian.class) .chatLanguageModel(model) .chatMemory(withMaxMessages(10)) .contentRetriever(retriever) .tools(tool) .build(); } @Bean ContentRetriever retriever(EmbeddingStore<TextSegment> embeddingStore) { return EmbeddingStoreRetriever.from(embeddingStore, new AllMiniLmL6V2EmbeddingModel(), 1, 0.6) .toContentRetriever(); } @Bean EmbeddingStore<TextSegment> embeddingStore(Environment environment) { return ElasticsearchEmbeddingStore.builder() .serverUrl(environment.getProperty("app.elasticsearch.uri")) .indexName("history") .build(); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.retriever.EmbeddingStoreRetriever.from", "dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder" ]
[((967, 1192), 'dev.langchain4j.service.AiServices.builder'), ((967, 1167), 'dev.langchain4j.service.AiServices.builder'), ((967, 1138), 'dev.langchain4j.service.AiServices.builder'), ((967, 1093), 'dev.langchain4j.service.AiServices.builder'), ((967, 1044), 'dev.langchain4j.service.AiServices.builder'), ((1303, 1428), 'dev.langchain4j.retriever.EmbeddingStoreRetriever.from'), ((1536, 1713), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1536, 1688), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1536, 1650), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder')]
import dev.langchain4j.model.openai.OpenAiChatModel; import java.net.InetSocketAddress; import java.net.Proxy; import static java.net.Proxy.Type.HTTP; public class ProxyExample { public static void main(String[] args) { OpenAiChatModel model = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .proxy(new Proxy(HTTP, new InetSocketAddress("39.175.77.7", 30001))) .build(); String answer = model.generate("hello"); System.out.println(answer); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((261, 444), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((261, 419), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((261, 334), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
import dev.langchain4j.chain.ConversationalChain; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.TokenWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import java.io.IOException; import static dev.langchain4j.data.message.UserMessage.userMessage; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; public class ChatMemoryExamples { // See also ServiceWithMemoryExample and ServiceWithMemoryForEachUserExample public static class ConversationalChain_Example { public static void main(String[] args) throws IOException { ConversationalChain chain = ConversationalChain.builder() .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) // .chatMemory() // you can override default chat memory .build(); String answer = chain.execute("Hello, my name is Klaus"); System.out.println(answer); // Hello Klaus! How can I assist you today? String answerWithName = chain.execute("What is my name?"); System.out.println(answerWithName); // Your name is Klaus. } } public static class If_You_Need_More_Control { public static void main(String[] args) { ChatLanguageModel model = OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY); ChatMemory chatMemory = TokenWindowChatMemory.withMaxTokens(300, new OpenAiTokenizer(GPT_3_5_TURBO)); // You have full control over the chat memory. // You can decide if you want to add a particular message to the memory // (e.g. you might not want to store few-shot examples to save on tokens). // You can process/modify the message before saving if required. chatMemory.add(userMessage("Hello, my name is Klaus")); AiMessage answer = model.generate(chatMemory.messages()).content(); System.out.println(answer.text()); // Hello Klaus! How can I assist you today? chatMemory.add(answer); chatMemory.add(userMessage("What is my name?")); AiMessage answerWithName = model.generate(chatMemory.messages()).content(); System.out.println(answerWithName.text()); // Your name is Klaus. chatMemory.add(answerWithName); } } }
[ "dev.langchain4j.chain.ConversationalChain.builder" ]
[((810, 1036), 'dev.langchain4j.chain.ConversationalChain.builder'), ((810, 930), 'dev.langchain4j.chain.ConversationalChain.builder')]
package me.nzuguem.bot.configurations.llm; import dev.langchain4j.rag.DefaultRetrievalAugmentor; import dev.langchain4j.rag.RetrievalAugmentor; import jakarta.enterprise.context.ApplicationScoped; import java.util.function.Supplier; @ApplicationScoped public class GithubAppRetrievalAugmentor implements Supplier<RetrievalAugmentor> { private final GithubAppContentRetriever retriever; public GithubAppRetrievalAugmentor(GithubAppContentRetriever retriever) { this.retriever = retriever; } @Override public RetrievalAugmentor get() { return DefaultRetrievalAugmentor.builder() .contentRetriever(this.retriever) .build(); } }
[ "dev.langchain4j.rag.DefaultRetrievalAugmentor.builder" ]
[((583, 693), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((583, 668), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder')]
import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; import java.util.HashMap; import java.util.List; import java.util.Map; import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.util.stream.Collectors.joining; public class ChatWithDocumentsExamples { // Please also check ServiceWithRetrieverExample static class IfYouNeedSimplicity { public static void main(String[] args) throws Exception { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(300, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Document document = loadDocument(toPath("example-files/story-about-happy-carrot.txt"), new TextDocumentParser()); ingestor.ingest(document); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) // .chatMemory() // you can override default chat memory // .promptTemplate() // you can override default prompt template .build(); String answer = chain.execute("Who is Charlie?"); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } } static class If_You_Need_More_Control { public static void main(String[] args) { // Load the document that includes the information you'd like to "chat" about with the model. Document document = loadDocument(toPath("example-files/story-about-happy-carrot.txt"), new TextDocumentParser()); // Split document into segments 100 tokens each DocumentSplitter splitter = DocumentSplitters.recursive( 100, 0, new OpenAiTokenizer(GPT_3_5_TURBO) ); List<TextSegment> segments = splitter.split(document); // Embed segments (convert them into vectors that represent the meaning) using embedding model EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); List<Embedding> embeddings = embeddingModel.embedAll(segments).content(); // Store embeddings into embedding store for further search / retrieval EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); // Specify the question you want to ask the model String question = "Who is Charlie?"; // Embed the question Embedding questionEmbedding = embeddingModel.embed(question).content(); // Find relevant embeddings in embedding store by semantic similarity // You can play with parameters below to find a sweet spot for your specific use case int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); // Create a prompt for the model that includes question and relevant embeddings PromptTemplate promptTemplate = PromptTemplate.from( "Answer the following question to the best of your ability:\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Base your answer on the following information:\n" + "{{information}}"); String information = relevantEmbeddings.stream() .map(match -> match.embedded().text()) .collect(joining("\n\n")); Map<String, Object> variables = new HashMap<>(); variables.put("question", question); variables.put("information", information); Prompt prompt = promptTemplate.apply(variables); // Send the prompt to the OpenAI chat model ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .timeout(Duration.ofSeconds(60)) .build(); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content(); // See an answer from the model String answer = aiMessage.text(); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } } private static Path toPath(String fileName) { try { URL fileUrl = ChatWithDocumentsExamples.class.getResource(fileName); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1961, 2201), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1961, 2172), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1961, 2120), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1961, 2068), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2419, 2832), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2419, 2641), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2419, 2548), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((5821, 5980), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5821, 5951), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5821, 5898), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.rag.query.Query; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.MemoryId; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.filter.Filter; import dev.langchain4j.store.embedding.filter.builder.sql.LanguageModelSqlFilterBuilder; import dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import org.junit.jupiter.api.Test; import java.util.function.Function; import static dev.langchain4j.data.document.Metadata.metadata; import static dev.langchain4j.store.embedding.filter.MetadataFilterBuilder.metadataKey; import static org.assertj.core.api.Assertions.assertThat; class _06_Metadata_Filtering { /** * More information can be found here: https://github.com/langchain4j/langchain4j/pull/610 */ ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder() .apiKey(System.getenv("OPENAI_API_KEY")) .build(); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); interface Assistant { String answer(String query); } @Test void Static_Metadata_Filter_Example() { // given TextSegment dogsSegment = TextSegment.from("Article about dogs ...", metadata("animal", "dog")); TextSegment birdsSegment = TextSegment.from("Article about birds ...", metadata("animal", "bird")); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.add(embeddingModel.embed(dogsSegment).content(), dogsSegment); embeddingStore.add(embeddingModel.embed(birdsSegment).content(), birdsSegment); // embeddingStore contains segments about both dogs and birds Filter onlyDogs = metadataKey("animal").isEqualTo("dog"); ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingStore(embeddingStore) .embeddingModel(embeddingModel) .filter(onlyDogs) // by specifying the static filter, we limit the search to segments only about dogs .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .contentRetriever(contentRetriever) .build(); // when String answer = assistant.answer("Which animal?"); // then assertThat(answer) .containsIgnoringCase("dog") .doesNotContainIgnoringCase("bird"); } interface PersonalizedAssistant { String chat(@MemoryId String userId, @dev.langchain4j.service.UserMessage String userMessage); } @Test void Dynamic_Metadata_Filter_Example() { // given TextSegment user1Info = TextSegment.from("My favorite color is green", metadata("userId", "1")); TextSegment user2Info = TextSegment.from("My favorite color is red", metadata("userId", "2")); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.add(embeddingModel.embed(user1Info).content(), user1Info); embeddingStore.add(embeddingModel.embed(user2Info).content(), user2Info); // embeddingStore contains information about both first and second user Function<Query, Filter> filterByUserId = (query) -> metadataKey("userId").isEqualTo(query.metadata().chatMemoryId().toString()); ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingStore(embeddingStore) .embeddingModel(embeddingModel) // by specifying the dynamic filter, we limit the search to segments that belong only to the current user .dynamicFilter(filterByUserId) .build(); PersonalizedAssistant personalizedAssistant = AiServices.builder(PersonalizedAssistant.class) .chatLanguageModel(chatLanguageModel) .contentRetriever(contentRetriever) .build(); // when String answer1 = personalizedAssistant.chat("1", "Which color would be best for a dress?"); // then assertThat(answer1) .containsIgnoringCase("green") .doesNotContainIgnoringCase("red"); // when String answer2 = personalizedAssistant.chat("2", "Which color would be best for a dress?"); // then assertThat(answer2) .containsIgnoringCase("red") .doesNotContainIgnoringCase("green"); } @Test void LLM_generated_Metadata_Filter_Example() { // given TextSegment forrestGump = TextSegment.from("Forrest Gump", metadata("genre", "drama").put("year", 1994)); TextSegment groundhogDay = TextSegment.from("Groundhog Day", metadata("genre", "comedy").put("year", 1993)); TextSegment dieHard = TextSegment.from("Die Hard", metadata("genre", "action").put("year", 1998)); // describe metadata keys as if they were columns in the SQL table TableDefinition tableDefinition = TableDefinition.builder() .name("movies") .addColumn("genre", "VARCHAR", "one of: [comedy, drama, action]") .addColumn("year", "INT") .build(); LanguageModelSqlFilterBuilder sqlFilterBuilder = new LanguageModelSqlFilterBuilder(chatLanguageModel, tableDefinition); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.add(embeddingModel.embed(forrestGump).content(), forrestGump); embeddingStore.add(embeddingModel.embed(groundhogDay).content(), groundhogDay); embeddingStore.add(embeddingModel.embed(dieHard).content(), dieHard); ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingStore(embeddingStore) .embeddingModel(embeddingModel) .dynamicFilter(query -> sqlFilterBuilder.build(query)) // LLM will generate the filter dynamically .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .contentRetriever(contentRetriever) .build(); // when String answer = assistant.answer("Recommend me a good drama from 90s"); // then assertThat(answer) .containsIgnoringCase("Forrest Gump") .doesNotContainIgnoringCase("Groundhog Day") .doesNotContainIgnoringCase("Die Hard"); } }
[ "dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder", "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1356, 1455), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1356, 1434), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2328, 2607), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2328, 2498), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2328, 2464), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2328, 2416), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2640, 2806), 'dev.langchain4j.service.AiServices.builder'), ((2640, 2781), 'dev.langchain4j.service.AiServices.builder'), ((2640, 2729), 'dev.langchain4j.service.AiServices.builder'), ((3993, 4323), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3993, 4298), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3993, 4129), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3993, 4081), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((4380, 4558), 'dev.langchain4j.service.AiServices.builder'), ((4380, 4533), 'dev.langchain4j.service.AiServices.builder'), ((4380, 4481), 'dev.langchain4j.service.AiServices.builder'), ((5624, 5830), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((5624, 5805), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((5624, 5763), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((5624, 5681), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((6344, 6620), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6344, 6551), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6344, 6480), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6344, 6432), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6653, 6819), 'dev.langchain4j.service.AiServices.builder'), ((6653, 6794), 'dev.langchain4j.service.AiServices.builder'), ((6653, 6742), 'dev.langchain4j.service.AiServices.builder')]
import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiModerationModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.Moderate; import dev.langchain4j.service.ModerationException; public class ServiceWithAutoModerationExample { interface Chat { @Moderate String chat(String text); } public static void main(String[] args) { OpenAiModerationModel moderationModel = OpenAiModerationModel.withApiKey(ApiKeys.OPENAI_API_KEY); Chat chat = AiServices.builder(Chat.class) .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .moderationModel(moderationModel) .build(); try { chat.chat("I WILL KILL YOU!!!"); } catch (ModerationException e) { System.out.println(e.getMessage()); // Text "I WILL KILL YOU!!!" violates content policy } } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((552, 744), 'dev.langchain4j.service.AiServices.builder'), ((552, 719), 'dev.langchain4j.service.AiServices.builder'), ((552, 669), 'dev.langchain4j.service.AiServices.builder')]
package dev.langchain4j.data.message; import dev.langchain4j.agent.tool.ToolExecutionRequest; import org.assertj.core.api.WithAssertions; import org.junit.jupiter.api.Test; class ToolExecutionResultMessageTest implements WithAssertions { @Test public void test_methods() { ToolExecutionResultMessage tm = new ToolExecutionResultMessage("id", "toolName", "text"); assertThat(tm.id()).isEqualTo("id"); assertThat(tm.toolName()).isEqualTo("toolName"); assertThat(tm.text()).isEqualTo("text"); assertThat(tm.type()).isEqualTo(ChatMessageType.TOOL_EXECUTION_RESULT); assertThat(tm) .hasToString( "ToolExecutionResultMessage " + "{ id = \"id\" toolName = \"toolName\" text = \"text\" }"); } @Test public void test_equals_hashCode() { ToolExecutionResultMessage t1 = new ToolExecutionResultMessage("id", "toolName", "text"); ToolExecutionResultMessage t2 = new ToolExecutionResultMessage("id", "toolName", "text"); ToolExecutionResultMessage t3 = new ToolExecutionResultMessage("foo", "toolName", "text"); ToolExecutionResultMessage t4 = new ToolExecutionResultMessage("foo", "toolName", "text"); assertThat(t1) .isEqualTo(t1) .isNotEqualTo(null) .isNotEqualTo(new Object()) .isEqualTo(t2) .hasSameHashCodeAs(t2) .isNotEqualTo(ToolExecutionResultMessage.from( "changed", "toolName", "text")) .isNotEqualTo(ToolExecutionResultMessage.from( "id", "changed", "text")) .isNotEqualTo(ToolExecutionResultMessage.from( "id", "toolName", "changed")) .isNotEqualTo(t3) .doesNotHaveSameHashCodeAs(t3); assertThat(t3) .isEqualTo(t3) .isEqualTo(t4) .hasSameHashCodeAs(t4); } @Test public void test_builders() { ToolExecutionRequest request = ToolExecutionRequest.builder() .id("id") .name("toolName") .arguments("arguments") .build(); assertThat(new ToolExecutionResultMessage("id", "toolName", "text")) .isEqualTo(ToolExecutionResultMessage.from("id", "toolName", "text")) .isEqualTo(ToolExecutionResultMessage.from(request, "text")) .isEqualTo(ToolExecutionResultMessage.toolExecutionResultMessage( "id", "toolName", "text")) .isEqualTo(ToolExecutionResultMessage.toolExecutionResultMessage(request, "text")); } }
[ "dev.langchain4j.agent.tool.ToolExecutionRequest.builder" ]
[((2103, 2258), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((2103, 2233), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((2103, 2193), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((2103, 2159), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder')]
package dev.langchain4j.service; import dev.langchain4j.agent.tool.P; import dev.langchain4j.agent.tool.Tool; import dev.langchain4j.agent.tool.ToolExecutionRequest; import dev.langchain4j.agent.tool.ToolSpecification; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.data.message.ToolExecutionResultMessage; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.output.Response; import dev.langchain4j.model.output.TokenUsage; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Spy; import org.mockito.junit.jupiter.MockitoExtension; import java.util.Arrays; import java.util.List; import static dev.langchain4j.agent.tool.JsonSchemaProperty.description; import static dev.langchain4j.agent.tool.JsonSchemaProperty.*; import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_3_5_TURBO_0613; import static dev.langchain4j.model.output.FinishReason.STOP; import static dev.langchain4j.service.AiServicesWithToolsIT.TemperatureUnit.Kelvin; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.data.Percentage.withPercentage; import static org.mockito.Mockito.*; @ExtendWith(MockitoExtension.class) class AiServicesWithToolsIT { @Spy ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .temperature(0.0) .logRequests(true) .logResponses(true) .build(); @AfterEach void afterEach() { verifyNoMoreInteractions(chatLanguageModel); } interface Assistant { Response<AiMessage> chat(String userMessage); } static class Calculator { static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder() .name("squareRoot") .description("calculates the square root of the provided number") .addParameter("arg0", NUMBER, description("number to operate on")) .build(); @Tool("calculates the square root of the provided number") double squareRoot(@P("number to operate on") double number) { System.out.printf("called squareRoot(%s)%n", number); return Math.sqrt(number); } } @Test void should_execute_a_tool_then_answer() { Calculator calculator = spy(new Calculator()); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .tools(calculator) .build(); String userMessage = "What is the square root of 485906798473894056 in scientific notation?"; Response<AiMessage> response = assistant.chat(userMessage); assertThat(response.content().text()).contains("6.97"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(72 + 109); assertThat(tokenUsage.outputTokenCount()).isCloseTo(20 + 31, withPercentage(5)); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(STOP); verify(calculator).squareRoot(485906798473894056.0); verifyNoMoreInteractions(calculator); List<ChatMessage> messages = chatMemory.messages(); assertThat(messages).hasSize(4); assertThat(messages.get(0)).isInstanceOf(dev.langchain4j.data.message.UserMessage.class); assertThat(messages.get(0).text()).isEqualTo(userMessage); AiMessage aiMessage = (AiMessage) messages.get(1); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(1); ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest.id()).isNotBlank(); assertThat(toolExecutionRequest.name()).isEqualTo("squareRoot"); assertThat(toolExecutionRequest.arguments()) .isEqualToIgnoringWhitespace("{\"arg0\": 485906798473894056}"); ToolExecutionResultMessage toolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(2); assertThat(toolExecutionResultMessage.id()).isEqualTo(toolExecutionRequest.id()); assertThat(toolExecutionResultMessage.toolName()).isEqualTo("squareRoot"); assertThat(toolExecutionResultMessage.text()).isEqualTo("6.97070153193991E8"); assertThat(messages.get(3)).isInstanceOf(AiMessage.class); assertThat(messages.get(3).text()).contains("6.97"); verify(chatLanguageModel).generate( singletonList(messages.get(0)), singletonList(Calculator.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2)), singletonList(Calculator.EXPECTED_SPECIFICATION) ); } @Test void should_execute_multiple_tools_sequentially_then_answer() { ChatLanguageModel chatLanguageModel = spy(OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .modelName(GPT_3_5_TURBO_0613) // this model can only call tools sequentially .temperature(0.0) .logRequests(true) .logResponses(true) .build()); Calculator calculator = spy(new Calculator()); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .tools(calculator) .build(); String userMessage = "What is the square root of 485906798473894056 and 97866249624785 in scientific notation?"; Response<AiMessage> response = assistant.chat(userMessage); assertThat(response.content().text()).contains("6.97", "9.89"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(79 + 117 + 152); assertThat(tokenUsage.outputTokenCount()).isCloseTo(21 + 20 + 53, withPercentage(5)); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(STOP); verify(calculator).squareRoot(485906798473894056.0); verify(calculator).squareRoot(97866249624785.0); verifyNoMoreInteractions(calculator); List<ChatMessage> messages = chatMemory.messages(); assertThat(messages).hasSize(6); assertThat(messages.get(0)).isInstanceOf(dev.langchain4j.data.message.UserMessage.class); assertThat(messages.get(0).text()).isEqualTo(userMessage); AiMessage aiMessage = (AiMessage) messages.get(1); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(1); ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest.id()).isNotBlank(); assertThat(toolExecutionRequest.name()).isEqualTo("squareRoot"); assertThat(toolExecutionRequest.arguments()) .isEqualToIgnoringWhitespace("{\"arg0\": 485906798473894056}"); ToolExecutionResultMessage toolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(2); assertThat(toolExecutionResultMessage.id()).isEqualTo(toolExecutionRequest.id()); assertThat(toolExecutionResultMessage.toolName()).isEqualTo("squareRoot"); assertThat(toolExecutionResultMessage.text()).isEqualTo("6.97070153193991E8"); AiMessage secondAiMessage = (AiMessage) messages.get(3); assertThat(secondAiMessage.text()).isNull(); assertThat(secondAiMessage.toolExecutionRequests()).hasSize(1); ToolExecutionRequest secondToolExecutionRequest = secondAiMessage.toolExecutionRequests().get(0); assertThat(secondToolExecutionRequest.id()).isNotBlank(); assertThat(secondToolExecutionRequest.name()).isEqualTo("squareRoot"); assertThat(secondToolExecutionRequest.arguments()) .isEqualToIgnoringWhitespace("{\"arg0\": 97866249624785}"); ToolExecutionResultMessage secondToolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(4); assertThat(secondToolExecutionResultMessage.id()).isEqualTo(secondToolExecutionRequest.id()); assertThat(secondToolExecutionResultMessage.toolName()).isEqualTo("squareRoot"); assertThat(secondToolExecutionResultMessage.text()).isEqualTo("9892737.215997653"); assertThat(messages.get(5)).isInstanceOf(AiMessage.class); assertThat(messages.get(5).text()).contains("6.97", "9.89"); verify(chatLanguageModel).generate( singletonList(messages.get(0)), singletonList(Calculator.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2)), singletonList(Calculator.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2), messages.get(3), messages.get(4)), singletonList(Calculator.EXPECTED_SPECIFICATION) ); } @Test void should_execute_multiple_tools_in_parallel_then_answer() { Calculator calculator = spy(new Calculator()); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .tools(calculator) .build(); String userMessage = "What is the square root of 485906798473894056 and 97866249624785 in scientific notation?"; Response<AiMessage> response = assistant.chat(userMessage); assertThat(response.content().text()).contains("6.97", "9.89"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(79 + 160); assertThat(tokenUsage.outputTokenCount()).isCloseTo(54 + 58, withPercentage(5)); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(STOP); verify(calculator).squareRoot(485906798473894056.0); verify(calculator).squareRoot(97866249624785.0); verifyNoMoreInteractions(calculator); List<ChatMessage> messages = chatMemory.messages(); assertThat(messages).hasSize(5); assertThat(messages.get(0)).isInstanceOf(dev.langchain4j.data.message.UserMessage.class); assertThat(messages.get(0).text()).isEqualTo(userMessage); AiMessage aiMessage = (AiMessage) messages.get(1); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(2); ToolExecutionRequest firstToolExecutionRequest = aiMessage.toolExecutionRequests().get(0); assertThat(firstToolExecutionRequest.id()).isNotBlank(); assertThat(firstToolExecutionRequest.name()).isEqualTo("squareRoot"); assertThat(firstToolExecutionRequest.arguments()) .isEqualToIgnoringWhitespace("{\"arg0\": 485906798473894056}"); ToolExecutionRequest secondToolExecutionRequest = aiMessage.toolExecutionRequests().get(1); assertThat(secondToolExecutionRequest.id()).isNotBlank(); assertThat(secondToolExecutionRequest.name()).isEqualTo("squareRoot"); assertThat(secondToolExecutionRequest.arguments()) .isEqualToIgnoringWhitespace("{\"arg0\": 97866249624785}"); ToolExecutionResultMessage firstToolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(2); assertThat(firstToolExecutionResultMessage.id()).isEqualTo(firstToolExecutionRequest.id()); assertThat(firstToolExecutionResultMessage.toolName()).isEqualTo("squareRoot"); assertThat(firstToolExecutionResultMessage.text()).isEqualTo("6.97070153193991E8"); ToolExecutionResultMessage secondToolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(3); assertThat(secondToolExecutionResultMessage.id()).isEqualTo(secondToolExecutionRequest.id()); assertThat(secondToolExecutionResultMessage.toolName()).isEqualTo("squareRoot"); assertThat(secondToolExecutionResultMessage.text()).isEqualTo("9892737.215997653"); assertThat(messages.get(4)).isInstanceOf(AiMessage.class); assertThat(messages.get(4).text()).contains("6.97", "9.89"); verify(chatLanguageModel).generate( singletonList(messages.get(0)), singletonList(Calculator.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2), messages.get(3)), singletonList(Calculator.EXPECTED_SPECIFICATION) ); } static class StringListProcessor { static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder() .name("processStrings") .description("Processes list of strings") .addParameter("arg0", ARRAY, items(STRING), description("List of strings to process")) .build(); @Tool("Processes list of strings") void processStrings(@P("List of strings to process") List<String> strings) { System.out.printf("called processStrings(%s)%n", strings); } } @Test void should_use_tool_with_List_of_Strings_parameter() { StringListProcessor stringListProcessor = spy(new StringListProcessor()); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .tools(stringListProcessor) .build(); String userMessage = "Process strings 'cat' and 'dog' together, do not separate them!"; // when assistant.chat(userMessage); // then verify(stringListProcessor).processStrings(asList("cat", "dog")); verifyNoMoreInteractions(stringListProcessor); List<ChatMessage> messages = chatMemory.messages(); verify(chatLanguageModel).generate( singletonList(messages.get(0)), singletonList(StringListProcessor.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2)), singletonList(StringListProcessor.EXPECTED_SPECIFICATION) ); } static class IntegerListProcessor { static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder() .name("processIntegers") .description("Processes list of integers") .addParameter("arg0", ARRAY, items(INTEGER), description("List of integers to process")) .build(); @Tool("Processes list of integers") void processIntegers(@P("List of integers to process") List<Integer> integers) { System.out.printf("called processIntegers(%s)%n", integers); } } @Test @Disabled // TODO fix: should automatically convert List<Double> into List<Integer> void should_use_tool_with_List_of_Integers_parameter() { IntegerListProcessor integerListProcessor = spy(new IntegerListProcessor()); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .tools(integerListProcessor) .build(); String userMessage = "Process integers 1 and 2 together, do not separate them!"; // when assistant.chat(userMessage); // then verify(integerListProcessor).processIntegers(asList(1, 2)); verifyNoMoreInteractions(integerListProcessor); List<ChatMessage> messages = chatMemory.messages(); verify(chatLanguageModel).generate( singletonList(messages.get(0)), singletonList(IntegerListProcessor.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2)), singletonList(IntegerListProcessor.EXPECTED_SPECIFICATION) ); } static class StringArrayProcessor { static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder() .name("processStrings") .description("Processes array of strings") .addParameter("arg0", ARRAY, items(STRING), description("Array of strings to process")) .build(); @Tool("Processes array of strings") void processStrings(@P("Array of strings to process") String[] ids) { System.out.printf("called processStrings(%s)%n", Arrays.toString(ids)); } } @Test @Disabled // TODO fix: should automatically convert List<String> into String[] void should_use_tool_with_Array_of_Strings_parameter() { StringArrayProcessor stringArrayProcessor = spy(new StringArrayProcessor()); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .tools(stringArrayProcessor) .build(); String userMessage = "Process strings 'cat' and 'dog' together, do not separate them!"; // when assistant.chat(userMessage); // then verify(stringArrayProcessor).processStrings(new String[]{"cat", "dog"}); verifyNoMoreInteractions(stringArrayProcessor); List<ChatMessage> messages = chatMemory.messages(); verify(chatLanguageModel).generate( singletonList(messages.get(0)), singletonList(StringArrayProcessor.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2)), singletonList(StringArrayProcessor.EXPECTED_SPECIFICATION) ); } static class WeatherService { static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder() .name("currentTemperature") .description("") // TODO should be null? .addParameter("arg0", STRING) .addParameter("arg1", STRING, from("enum", asList("CELSIUS", "fahrenheit", "Kelvin"))) .build(); @Tool int currentTemperature(String city, TemperatureUnit unit) { System.out.printf("called currentTemperature(%s, %s)%n", city, unit); return 42; } } enum TemperatureUnit { CELSIUS, fahrenheit, Kelvin } @Test void should_use_tool_with_enum_parameter() { // given WeatherService weatherService = spy(new WeatherService()); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .tools(weatherService) .build(); // when Response<AiMessage> response = assistant.chat("What is the temperature in Munich now, in kelvin?"); // then assertThat(response.content().text()).contains("42"); verify(weatherService).currentTemperature("Munich", Kelvin); verifyNoMoreInteractions(weatherService); List<ChatMessage> messages = chatMemory.messages(); verify(chatLanguageModel).generate( singletonList(messages.get(0)), singletonList(WeatherService.EXPECTED_SPECIFICATION) ); verify(chatLanguageModel).generate( asList(messages.get(0), messages.get(1), messages.get(2)), singletonList(WeatherService.EXPECTED_SPECIFICATION) ); } // TODO test Lists, Sets, Arrays of different types (including enums). }
[ "dev.langchain4j.agent.tool.ToolSpecification.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1704, 2020), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1999), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1967), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1936), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1906), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1837), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1784), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2298, 2551), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2298, 2526), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2298, 2443), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2298, 2361), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((5791, 6229), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6204), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6168), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6133), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6052), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6005), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 5932), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 5875), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((14240, 14493), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((14240, 14468), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((14240, 14365), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((14240, 14307), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16277), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16252), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16147), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16088), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 18157), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 18132), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 18028), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 17969), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 20093), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 20068), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 19965), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 19895), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 19862), 'dev.langchain4j.agent.tool.ToolSpecification.builder')]
package dev.langchain4j.rag.query.transformer; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.rag.query.Metadata; import dev.langchain4j.rag.query.Query; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.util.Collection; import java.util.List; import java.util.stream.Stream; import static java.util.Arrays.asList; import static org.assertj.core.api.Assertions.assertThat; class CompressingQueryTransformerIT { @ParameterizedTest @MethodSource("models") void should_compress_query_and_chat_memory_into_single_query(ChatLanguageModel model) { // given List<ChatMessage> chatMemory = asList( UserMessage.from("Tell me about Klaus Heisler"), AiMessage.from("He is a cool guy") ); UserMessage userMessage = UserMessage.from("How old is he?"); Metadata metadata = Metadata.from(userMessage, "default", chatMemory); Query query = Query.from(userMessage.text(), metadata); CompressingQueryTransformer transformer = new CompressingQueryTransformer(model); // when Collection<Query> queries = transformer.transform(query); System.out.println(queries); // then assertThat(queries).hasSize(1); Query compressedQuery = queries.iterator().next(); assertThat(compressedQuery.text()).contains("Klaus"); assertThat(compressedQuery.text()).doesNotContain(":"); } static Stream<Arguments> models() { return Stream.of( Arguments.of( OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .logRequests(true) .logResponses(true) .build() ) // TODO add more models ); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1894, 2300), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1894, 2259), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1894, 2207), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1894, 2156), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1894, 2067), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1894, 1994), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package org.acme.examples.aiservices; import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.options; import static dev.langchain4j.data.message.ChatMessageDeserializer.messagesFromJson; import static dev.langchain4j.data.message.ChatMessageSerializer.messagesToJson; import static dev.langchain4j.data.message.ChatMessageType.AI; import static dev.langchain4j.data.message.ChatMessageType.SYSTEM; import static dev.langchain4j.data.message.ChatMessageType.USER; import static io.quarkiverse.langchain4j.openai.test.WiremockUtils.DEFAULT_TOKEN; import static java.time.Month.JULY; import static org.acme.examples.aiservices.MessageAssertUtils.*; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.tuple; import java.io.IOException; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import jakarta.validation.constraints.NotNull; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.databind.ObjectMapper; import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.stubbing.Scenario; import com.github.tomakehurst.wiremock.stubbing.ServeEvent; import com.github.tomakehurst.wiremock.verification.LoggedRequest; import dev.langchain4j.agent.tool.Tool; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.memory.chat.ChatMemoryProvider; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.input.structured.StructuredPrompt; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiModerationModel; import dev.langchain4j.model.output.structured.Description; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.MemoryId; import dev.langchain4j.service.Moderate; import dev.langchain4j.service.ModerationException; import dev.langchain4j.service.SystemMessage; import dev.langchain4j.service.UserMessage; import dev.langchain4j.service.V; import dev.langchain4j.store.memory.chat.ChatMemoryStore; import io.opentelemetry.instrumentation.annotations.SpanAttribute; import io.quarkiverse.langchain4j.openai.test.WiremockUtils; import io.quarkus.test.QuarkusUnitTest; public class AiServicesTest { @RegisterExtension static final QuarkusUnitTest unitTest = new QuarkusUnitTest() .setArchiveProducer( () -> ShrinkWrap.create(JavaArchive.class).addClasses(WiremockUtils.class, MessageAssertUtils.class)); static WireMockServer wireMockServer; static ObjectMapper mapper; private static OpenAiChatModel createChatModel() { return OpenAiChatModel.builder().baseUrl("http://localhost:8089/v1") .logRequests(true) .logResponses(true) .apiKey("whatever").build(); } private static OpenAiModerationModel createModerationModel() { return OpenAiModerationModel.builder().baseUrl("http://localhost:8089/v1") .logRequests(true) .logResponses(true) .apiKey("whatever").build(); } private static MessageWindowChatMemory createChatMemory() { return MessageWindowChatMemory.withMaxMessages(10); } @BeforeAll static void beforeAll() { wireMockServer = new WireMockServer(options().port(8089)); wireMockServer.start(); mapper = new ObjectMapper(); } @AfterAll static void afterAll() { wireMockServer.stop(); } @BeforeEach void setup() { wireMockServer.resetAll(); wireMockServer.stubFor(WiremockUtils.defaultChatCompletionsStub()); } interface Assistant { String chat(String message); } @Test public void test_simple_instruction_with_single_argument_and_no_annotations() throws IOException { String result = AiServices.create(Assistant.class, createChatModel()).chat("Tell me a joke about developers"); assertThat(result).isNotBlank(); assertSingleRequestMessage(getRequestAsMap(), "Tell me a joke about developers"); } interface Humorist { @UserMessage("Tell me a joke about {{wrapper.topic}}") String joke(@SpanAttribute @NotNull Wrapper wrapper); } public record Wrapper(String topic) { } @Test public void test_simple_instruction_with_single_argument() throws IOException { String result = AiServices.create(Humorist.class, createChatModel()).joke(new Wrapper("programmers")); assertThat(result).isNotBlank(); assertSingleRequestMessage(getRequestAsMap(), "Tell me a joke about programmers"); } interface DateTimeExtractor { @UserMessage("Extract date from {{it}}") LocalDate extractDateFrom(String text); @UserMessage("Extract time from {{it}}") LocalTime extractTimeFrom(String text); @UserMessage("Extract date and time from {{it}}") LocalDateTime extractDateTimeFrom(String text); } @Test void test_extract_date() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "1968-07-04")); DateTimeExtractor dateTimeExtractor = AiServices.create(DateTimeExtractor.class, createChatModel()); LocalDate result = dateTimeExtractor.extractDateFrom( "The tranquility pervaded the evening of 1968, just fifteen minutes shy of midnight, following the celebrations of Independence Day."); assertThat(result).isEqualTo(LocalDate.of(1968, JULY, 4)); assertSingleRequestMessage(getRequestAsMap(), "Extract date from The tranquility pervaded the evening of 1968, just fifteen minutes shy of midnight, following the celebrations of Independence Day.\nYou must answer strictly in the following format: yyyy-MM-dd"); } @Test void test_extract_time() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "23:45:00")); DateTimeExtractor dateTimeExtractor = AiServices.create(DateTimeExtractor.class, createChatModel()); LocalTime result = dateTimeExtractor.extractTimeFrom( "The tranquility pervaded the evening of 1968, just fifteen minutes shy of midnight, following the celebrations of Independence Day."); assertThat(result).isEqualTo(LocalTime.of(23, 45, 0)); assertSingleRequestMessage(getRequestAsMap(), "Extract time from The tranquility pervaded the evening of 1968, just fifteen minutes shy of midnight, following the celebrations of Independence Day.\nYou must answer strictly in the following format: HH:mm:ss"); } @Test void test_extract_date_time() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "1968-07-04T23:45:00")); DateTimeExtractor dateTimeExtractor = AiServices.create(DateTimeExtractor.class, createChatModel()); LocalDateTime result = dateTimeExtractor.extractDateTimeFrom( "The tranquility pervaded the evening of 1968, just fifteen minutes shy of midnight, following the celebrations of Independence Day."); assertThat(result).isEqualTo(LocalDateTime.of(1968, JULY, 4, 23, 45, 0)); assertSingleRequestMessage(getRequestAsMap(), "Extract date and time from The tranquility pervaded the evening of 1968, just fifteen minutes shy of midnight, following the celebrations of Independence Day.\nYou must answer strictly in the following format: yyyy-MM-ddTHH:mm:ss"); } enum Sentiment { POSITIVE, NEUTRAL, NEGATIVE } interface SentimentAnalyzer { @UserMessage("Analyze sentiment of {{it}}") Sentiment analyzeSentimentOf(String text); } @Test void test_extract_enum() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "POSITIVE")); SentimentAnalyzer sentimentAnalyzer = AiServices.create(SentimentAnalyzer.class, createChatModel()); Sentiment sentiment = sentimentAnalyzer.analyzeSentimentOf( "This LaptopPro X15 is wicked fast and that 4K screen is a dream."); assertThat(sentiment).isEqualTo(Sentiment.POSITIVE); assertSingleRequestMessage(getRequestAsMap(), "Analyze sentiment of This LaptopPro X15 is wicked fast and that 4K screen is a dream.\nYou must answer strictly in the following format: one of [POSITIVE, NEUTRAL, NEGATIVE]"); } record Person(String firstName, String lastName, LocalDate birthDate) { @JsonCreator public Person { } } interface PersonExtractor { @UserMessage("Extract information about a person from {{it}}") Person extractPersonFrom(String text); } @Test void test_extract_custom_POJO() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), // this is supposed to be a string inside a json string hence all the escaping... "{\\n\\\"firstName\\\": \\\"John\\\",\\n\\\"lastName\\\": \\\"Doe\\\",\\n\\\"birthDate\\\": \\\"1968-07-04\\\"\\n}")); PersonExtractor personExtractor = AiServices.create(PersonExtractor.class, createChatModel()); String text = "In 1968, amidst the fading echoes of Independence Day, " + "a child named John arrived under the calm evening sky. " + "This newborn, bearing the surname Doe, marked the start of a new journey."; Person result = personExtractor.extractPersonFrom(text); assertThat(result.firstName).isEqualTo("John"); assertThat(result.lastName).isEqualTo("Doe"); assertThat(result.birthDate).isEqualTo(LocalDate.of(1968, JULY, 4)); assertSingleRequestMessage(getRequestAsMap(), "Extract information about a person from In 1968, amidst the fading echoes of Independence Day, a child named John arrived under the calm evening sky. This newborn, bearing the surname Doe, marked the start of a new journey.\nYou must answer strictly in the following JSON format: {\n\"firstName\": (type: string),\n\"lastName\": (type: string),\n\"birthDate\": (type: date string (2023-12-31)),\n}"); } static class Recipe { private String title; private String description; @Description("each step should be described in 4 words, steps should rhyme") private String[] steps; private Integer preparationTimeMinutes; } @StructuredPrompt("Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}") static class CreateRecipePrompt { private final String dish; private final List<String> ingredients; public CreateRecipePrompt(String dish, List<String> ingredients) { this.dish = dish; this.ingredients = ingredients; } public String getDish() { return dish; } public List<String> getIngredients() { return ingredients; } } interface Chef { @UserMessage("Create recipe using only {{it}}") Recipe createRecipeFrom(String... ingredients); Recipe createRecipeFrom(CreateRecipePrompt prompt); @SystemMessage("You are a very {{character}} chef") Recipe createRecipeFrom(@UserMessage CreateRecipePrompt prompt, String character); } @Test void test_create_recipe_from_list_of_ingredients() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), // this is supposed to be a string inside a json string hence all the escaping... "{\\n\\\"title\\\": \\\"Greek Salad\\\",\\n\\\"description\\\": \\\"A refreshing and tangy salad with Mediterranean flavors.\\\",\\n\\\"steps\\\": [\\n\\\"Chop, dice, and slice.\\\",\\n\\\"Mix veggies with feta.\\\",\\n\\\"Drizzle with olive oil.\\\",\\n\\\"Toss gently, then serve.\\\"\\n],\\n\\\"preparationTimeMinutes\\\": 15\\n}")); Chef chef = AiServices.create(Chef.class, createChatModel()); Recipe result = chef.createRecipeFrom("cucumber", "tomato", "feta", "onion", "olives"); assertThat(result.title).isNotBlank(); assertThat(result.description).isNotBlank(); assertThat(result.steps).isNotEmpty(); assertThat(result.preparationTimeMinutes).isPositive(); assertSingleRequestMessage(getRequestAsMap(), "Create recipe using only [cucumber, tomato, feta, onion, olives]\nYou must answer strictly in the following JSON format: {\n\"title\": (type: string),\n\"description\": (type: string),\n\"steps\": (each step should be described in 4 words, steps should rhyme; type: array of string),\n\"preparationTimeMinutes\": (type: integer),\n}"); } @Test void test_create_recipe_using_structured_prompt() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), // this is supposed to be a string inside a json string hence all the escaping... "{\\n\\\"title\\\": \\\"Greek Salad\\\",\\n\\\"description\\\": \\\"A refreshing and tangy salad with Mediterranean flavors.\\\",\\n\\\"steps\\\": [\\n\\\"Chop, dice, and slice.\\\",\\n\\\"Mix veggies with feta.\\\",\\n\\\"Drizzle with olive oil.\\\",\\n\\\"Toss gently, then serve.\\\"\\n],\\n\\\"preparationTimeMinutes\\\": 15\\n}")); Chef chef = AiServices.create(Chef.class, createChatModel()); Recipe result = chef .createRecipeFrom(new CreateRecipePrompt("salad", List.of("cucumber", "tomato", "feta", "onion", "olives"))); assertThat(result.title).isNotBlank(); assertThat(result.description).isNotBlank(); assertThat(result.steps).isNotEmpty(); assertThat(result.preparationTimeMinutes).isPositive(); assertSingleRequestMessage(getRequestAsMap(), "Create a recipe of a salad that can be prepared using only [cucumber, tomato, feta, onion, olives]\nYou must answer strictly in the following JSON format: {\n\"title\": (type: string),\n\"description\": (type: string),\n\"steps\": (each step should be described in 4 words, steps should rhyme; type: array of string),\n\"preparationTimeMinutes\": (type: integer),\n}"); } @Test void test_create_recipe_using_structured_prompt_and_system_message() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), // this is supposed to be a string inside a json string hence all the escaping... "{\\n\\\"title\\\": \\\"Greek Medley Salad\\\",\\n\\\"description\\\": \\\"A refreshing and tangy salad with a Mediterranean twist.\\\",\\n\\\"steps\\\": [\\n\\\"Slice and dice, precise!\\\",\\n\\\"Mix and toss, no loss!\\\",\\n\\\"Sprinkle feta, get betta!\\\",\\n\\\"Garnish with olives, no jives!\\\"\\n],\\n\\\"preparationTimeMinutes\\\": 15\\n}")); Chef chef = AiServices.create(Chef.class, createChatModel()); Recipe result = chef .createRecipeFrom(new CreateRecipePrompt("salad", List.of("cucumber", "tomato", "feta", "onion", "olives")), "funny"); assertThat(result.title).isEqualTo("Greek Medley Salad"); assertThat(result.description).isNotBlank(); assertThat(result.steps).hasSize(4).satisfies(strings -> { assertThat(strings[0]).contains("Slice and dice"); assertThat(strings[3]).contains("jives"); }); assertThat(result.preparationTimeMinutes).isEqualTo(15); assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", "You are a very funny chef"), new MessageContent("user", "Create a recipe of a salad that can be prepared using only [cucumber, tomato, feta, onion, olives]\nYou must answer strictly in the following JSON format: {\n\"title\": (type: string),\n\"description\": (type: string),\n\"steps\": (each step should be described in 4 words, steps should rhyme; type: array of string),\n\"preparationTimeMinutes\": (type: integer),\n}"))); } @SystemMessage("You are a professional chef. You are friendly, polite and concise.") interface ProfessionalChef { String answer(String question); @SystemMessage("You are an amateur.") String answer2(String question); } @Test void test_with_system_message_of_first_method() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Grilling chicken typically takes around 10-15 minutes per side, depending on the thickness of the chicken. It's important to ensure the internal temperature reaches 165°F (74°C) for safe consumption.")); ProfessionalChef chef = AiServices.create(ProfessionalChef.class, createChatModel()); String result = chef.answer("How long should I grill chicken?"); assertThat(result).contains("Grilling chicken typically"); assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", "You are a professional chef. You are friendly, polite and concise."), new MessageContent("user", "How long should I grill chicken?"))); } @Test void test_with_system_message_of_second_method() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Grilling chicken typically takes around 10-15 minutes per side, depending on the thickness of the chicken. It's important to ensure the internal temperature reaches 165°F (74°C) for safe consumption.")); ProfessionalChef chef = AiServices.create(ProfessionalChef.class, createChatModel()); String result = chef.answer2("How long should I grill chicken?"); assertThat(result).contains("Grilling chicken typically"); assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", "You are an amateur."), new MessageContent("user", "How long should I grill chicken?"))); } interface Translator { @SystemMessage("You are a professional translator into {{lang}}") @UserMessage("Translate the following text: {{text}}") String translate(@V("text") String text, @V("lang") String language); } @Test void test_with_system_and_user_messages() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Hallo, wie geht es dir?")); Translator translator = AiServices.create(Translator.class, createChatModel()); String translation = translator.translate("Hello, how are you?", "german"); assertThat(translation).isEqualTo("Hallo, wie geht es dir?"); assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", "You are a professional translator into german"), new MessageContent("user", "Translate the following text: Hello, how are you?"))); } interface Summarizer { @SystemMessage("Summarize every message from user in {{n}} bullet points. Provide only bullet points.") List<String> summarize(@UserMessage String text, @MemoryId int n); } @Test void test_with_system_message_and_user_message_as_argument() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "- AI is a branch of computer science\\n- AI aims to create machines that mimic human intelligence\\n- AI can perform tasks like recognizing patterns, making decisions, and predictions")); Summarizer summarizer = AiServices.create(Summarizer.class, createChatModel()); String text = "AI, or artificial intelligence, is a branch of computer science that aims to create " + "machines that mimic human intelligence. This can range from simple tasks such as recognizing " + "patterns or speech to more complex tasks like making decisions or predictions."; List<String> bulletPoints = summarizer.summarize(text, 3); assertThat(bulletPoints).hasSize(3).satisfies(list -> { assertThat(list.get(0)).contains("branch"); assertThat(list.get(2)).contains("predictions"); }); assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", "Summarize every message from user in 3 bullet points. Provide only bullet points."), new MessageContent("user", text + "\nYou must put every item on a separate line."))); } interface ChatWithModeration { @Moderate String chat(String message); } @Test void should_throw_when_text_is_flagged() { wireMockServer.stubFor(WiremockUtils.moderationMapping(DEFAULT_TOKEN) .willReturn(aResponse() .withHeader("Content-Type", "application/json") .withBody( """ { "id": "modr-8Bmx2bYNsgzuAsSuxaQRDCMKHgJbC", "model": "text-moderation-006", "results": [ { "flagged": true, "categories": { "sexual": false, "hate": true, "harassment": false, "self-harm": false, "sexual/minors": false, "hate/threatening": true, "violence/graphic": false, "self-harm/intent": false, "self-harm/instructions": false, "harassment/threatening": false, "violence": false }, "category_scores": { "sexual": 0.0001485530665377155, "hate": 0.00004570276360027492, "harassment": 0.00006113418203312904, "self-harm": 5.4490744361146426e-8, "sexual/minors": 6.557503979820467e-7, "hate/threatening": 7.536454127432535e-9, "violence/graphic": 2.776141343474592e-7, "self-harm/intent": 9.653235544249128e-9, "self-harm/instructions": 1.2119762970996817e-9, "harassment/threatening": 5.06949959344638e-7, "violence": 0.000026839805286726914 } } ] } """))); ChatWithModeration chatWithModeration = AiServices.builder(ChatWithModeration.class) .chatLanguageModel(createChatModel()) .moderationModel(createModerationModel()) .build(); assertThatThrownBy(() -> chatWithModeration.chat("I WILL KILL YOU!!!")) .isExactlyInstanceOf(ModerationException.class) .hasMessage("Text \"" + "I WILL KILL YOU!!!" + "\" violates content policy"); } @Test void should_not_throw_when_text_is_not_flagged() { wireMockServer.stubFor(WiremockUtils.moderationMapping(DEFAULT_TOKEN) .willReturn(aResponse() .withHeader("Content-Type", "application/json") .withBody( """ { "id": "modr-8Bmx2bYNsgzuAsSuxaQRDCMKHgJbC", "model": "text-moderation-006", "results": [ { "flagged": false, "categories": { "sexual": false, "hate": true, "harassment": false, "self-harm": false, "sexual/minors": false, "hate/threatening": false, "violence/graphic": false, "self-harm/intent": false, "self-harm/instructions": false, "harassment/threatening": false, "violence": false }, "category_scores": { "sexual": 0.0001485530665377155, "hate": 0.00004570276360027492, "harassment": 0.00006113418203312904, "self-harm": 5.4490744361146426e-8, "sexual/minors": 6.557503979820467e-7, "hate/threatening": 7.536454127432535e-9, "violence/graphic": 2.776141343474592e-7, "self-harm/intent": 9.653235544249128e-9, "self-harm/instructions": 1.2119762970996817e-9, "harassment/threatening": 5.06949959344638e-7, "violence": 0.000026839805286726914 } } ] } """))); ChatWithModeration chatWithModeration = AiServices.builder(ChatWithModeration.class) .chatLanguageModel(createChatModel()) .moderationModel(createModerationModel()) .build(); String result = chatWithModeration.chat("I will hug you"); assertThat(result).isNotBlank(); } interface ChatWithMemory { String chatWithoutSystemMessage(String userMessage); @SystemMessage("You are helpful assistant") String chatWithSystemMessage(String userMessage); @SystemMessage("You are funny assistant") String chatWithAnotherSystemMessage(String userMessage); } @Test void should_keep_chat_memory() throws IOException { MessageWindowChatMemory chatMemory = createChatMemory(); ChatWithMemory chatWithMemory = AiServices.builder(ChatWithMemory.class) .chatLanguageModel(createChatModel()) .chatMemory(chatMemory) .build(); /* **** First request **** */ String firstUserMessage = "Hello, my name is Klaus"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Klaus")); String firstAiMessage = chatWithMemory.chatWithoutSystemMessage(firstUserMessage); // assert response assertThat(firstAiMessage).isEqualTo("Nice to meet you Klaus"); // assert request assertSingleRequestMessage(getRequestAsMap(), firstUserMessage); // assert chat memory assertThat(chatMemory.messages()).hasSize(2) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstUserMessage), tuple(AI, firstAiMessage)); /* **** Second request **** */ wireMockServer.resetRequests(); String secondUserMessage = "What is my name?"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Your name is Klaus")); String secondAiMessage = chatWithMemory.chatWithoutSystemMessage(secondUserMessage); // assert response assertThat(secondAiMessage).contains("Klaus"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("user", firstUserMessage), new MessageContent("assistant", firstAiMessage), new MessageContent("user", secondUserMessage))); // assert chat memory assertThat(chatMemory.messages()).hasSize(4) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstUserMessage), tuple(AI, firstAiMessage), tuple(USER, secondUserMessage), tuple(AI, secondAiMessage)); } @Test void should_keep_chat_memory_and_not_duplicate_system_message() throws IOException { MessageWindowChatMemory chatMemory = createChatMemory(); ChatWithMemory chatWithMemory = AiServices.builder(ChatWithMemory.class) .chatLanguageModel(createChatModel()) .chatMemory(chatMemory) .build(); /* **** First request **** */ String systemMessage = "You are helpful assistant"; String firstUserMessage = "Hello, my name is Klaus"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Klaus")); String firstAiMessage = chatWithMemory.chatWithSystemMessage(firstUserMessage); // assert response assertThat(firstAiMessage).isEqualTo("Nice to meet you Klaus"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", systemMessage), new MessageContent("user", firstUserMessage))); // assert chat memory assertThat(chatMemory.messages()).hasSize(3) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(SYSTEM, systemMessage), tuple(USER, firstUserMessage), tuple(AI, firstAiMessage)); /* **** Second request **** */ wireMockServer.resetRequests(); String secondUserMessage = "What is my name?"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Your name is Klaus")); String secondAiMessage = chatWithMemory.chatWithSystemMessage(secondUserMessage); // assert response assertThat(secondAiMessage).contains("Klaus"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", systemMessage), new MessageContent("user", firstUserMessage), new MessageContent("assistant", firstAiMessage), new MessageContent("user", secondUserMessage))); // assert chat memory assertThat(chatMemory.messages()).hasSize(5) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(SYSTEM, systemMessage), tuple(USER, firstUserMessage), tuple(AI, firstAiMessage), tuple(USER, secondUserMessage), tuple(AI, secondAiMessage)); } @Test void should_keep_chat_memory_and_add_new_system_message() throws IOException { MessageWindowChatMemory chatMemory = createChatMemory(); ChatWithMemory chatWithMemory = AiServices.builder(ChatWithMemory.class) .chatLanguageModel(createChatModel()) .chatMemory(chatMemory) .build(); /* **** First request **** */ String firstSystemMessage = "You are helpful assistant"; String firstUserMessage = "Hello, my name is Klaus"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Klaus")); String firstAiMessage = chatWithMemory.chatWithSystemMessage(firstUserMessage); // assert response assertThat(firstAiMessage).isEqualTo("Nice to meet you Klaus"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("system", firstSystemMessage), new MessageContent("user", firstUserMessage))); // assert chat memory assertThat(chatMemory.messages()).hasSize(3) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(SYSTEM, firstSystemMessage), tuple(USER, firstUserMessage), tuple(AI, firstAiMessage)); /* **** Second request **** */ wireMockServer.resetRequests(); String secondSystemMessage = "You are funny assistant"; String secondUserMessage = "What is my name?"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Your name is Klaus")); String secondAiMessage = chatWithMemory.chatWithAnotherSystemMessage(secondUserMessage); // assert response assertThat(secondAiMessage).contains("Klaus"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("user", firstUserMessage), new MessageContent("assistant", firstAiMessage), new MessageContent("system", secondSystemMessage), new MessageContent("user", secondUserMessage))); // assert chat memory assertThat(chatMemory.messages()).hasSize(5) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstUserMessage), tuple(AI, firstAiMessage), tuple(SYSTEM, secondSystemMessage), tuple(USER, secondUserMessage), tuple(AI, secondAiMessage)); } interface ChatWithSeparateMemoryForEachUser { String chat(@MemoryId int memoryId, @UserMessage String userMessage); } @Test void should_keep_separate_chat_memory_for_each_user_in_store() throws IOException { // emulating persistent storage Map</* memoryId */ Object, String> persistentStorage = new HashMap<>(); ChatMemoryStore store = new ChatMemoryStore() { @Override public List<ChatMessage> getMessages(Object memoryId) { return messagesFromJson(persistentStorage.get(memoryId)); } @Override public void updateMessages(Object memoryId, List<ChatMessage> messages) { persistentStorage.put(memoryId, messagesToJson(messages)); } @Override public void deleteMessages(Object memoryId) { persistentStorage.remove(memoryId); } }; ChatMemoryProvider chatMemoryProvider = memoryId -> MessageWindowChatMemory.builder() .id(memoryId) .maxMessages(10) .chatMemoryStore(store) .build(); int firstMemoryId = 1; int secondMemoryId = 2; ChatWithSeparateMemoryForEachUser chatWithMemory = AiServices.builder(ChatWithSeparateMemoryForEachUser.class) .chatLanguageModel(createChatModel()) .chatMemoryProvider(chatMemoryProvider) .build(); /* **** First request for user 1 **** */ String firstMessageFromFirstUser = "Hello, my name is Klaus"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Klaus")); String firstAiResponseToFirstUser = chatWithMemory.chat(firstMemoryId, firstMessageFromFirstUser); // assert response assertThat(firstAiResponseToFirstUser).isEqualTo("Nice to meet you Klaus"); // assert request assertSingleRequestMessage(getRequestAsMap(), firstMessageFromFirstUser); // assert chat memory assertThat(store.getMessages(firstMemoryId)).hasSize(2) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromFirstUser), tuple(AI, firstAiResponseToFirstUser)); /* **** First request for user 2 **** */ wireMockServer.resetRequests(); String firstMessageFromSecondUser = "Hello, my name is Francine"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Francine")); String firstAiResponseToSecondUser = chatWithMemory.chat(secondMemoryId, firstMessageFromSecondUser); // assert response assertThat(firstAiResponseToSecondUser).isEqualTo("Nice to meet you Francine"); // assert request assertSingleRequestMessage(getRequestAsMap(), firstMessageFromSecondUser); // assert chat memory assertThat(store.getMessages(secondMemoryId)).hasSize(2) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromSecondUser), tuple(AI, firstAiResponseToSecondUser)); /* **** Second request for user 1 **** */ wireMockServer.resetRequests(); String secondsMessageFromFirstUser = "What is my name?"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Your name is Klaus")); String secondAiMessageToFirstUser = chatWithMemory.chat(firstMemoryId, secondsMessageFromFirstUser); // assert response assertThat(secondAiMessageToFirstUser).contains("Klaus"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("user", firstMessageFromFirstUser), new MessageContent("assistant", firstAiResponseToFirstUser), new MessageContent("user", secondsMessageFromFirstUser))); // assert chat memory assertThat(store.getMessages(firstMemoryId)).hasSize(4) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromFirstUser), tuple(AI, firstAiResponseToFirstUser), tuple(USER, secondsMessageFromFirstUser), tuple(AI, secondAiMessageToFirstUser)); /* **** Second request for user 2 **** */ wireMockServer.resetRequests(); String secondsMessageFromSecondUser = "What is my name?"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Your name is Francine")); String secondAiMessageToSecondUser = chatWithMemory.chat(secondMemoryId, secondsMessageFromSecondUser); // assert response assertThat(secondAiMessageToSecondUser).contains("Francine"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageContent("user", firstMessageFromSecondUser), new MessageContent("assistant", firstAiResponseToSecondUser), new MessageContent("user", secondsMessageFromSecondUser))); // assert chat memory assertThat(store.getMessages(secondMemoryId)).hasSize(4) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromSecondUser), tuple(AI, firstAiResponseToSecondUser), tuple(USER, secondsMessageFromSecondUser), tuple(AI, secondAiMessageToSecondUser)); } static class Calculator { private final Runnable after; Calculator(Runnable after) { this.after = after; } @Tool("calculates the square root of the provided number") double squareRoot(double number) { var result = Math.sqrt(number); after.run(); return result; } } @Test void should_execute_tool_then_answer() throws IOException { var firstResponse = """ { "id": "chatcmpl-8D88Dag1gAKnOPP9Ed4bos7vSpaNz", "object": "chat.completion", "created": 1698140213, "model": "gpt-3.5-turbo-0613", "choices": [ { "index": 0, "message": { "role": "assistant", "content": null, "function_call": { "name": "squareRoot", "arguments": "{\\n \\"number\\": 485906798473894056\\n}" } }, "finish_reason": "function_call" } ], "usage": { "prompt_tokens": 65, "completion_tokens": 20, "total_tokens": 85 } } """; var secondResponse = """ { "id": "chatcmpl-8D88FIAUWSpwLaShFr0w8G1SWuVdl", "object": "chat.completion", "created": 1698140215, "model": "gpt-3.5-turbo-0613", "choices": [ { "index": 0, "message": { "role": "assistant", "content": "The square root of 485,906,798,473,894,056 in scientific notation is approximately 6.97070153193991E8." }, "finish_reason": "stop" } ], "usage": { "prompt_tokens": 102, "completion_tokens": 33, "total_tokens": 135 } } """; String scenario = "tools"; String secondState = "second"; wireMockServer.stubFor( WiremockUtils.chatCompletionMapping(DEFAULT_TOKEN) .inScenario(scenario) .whenScenarioStateIs(Scenario.STARTED) .willReturn(WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody(firstResponse))); wireMockServer.stubFor( WiremockUtils.chatCompletionMapping(DEFAULT_TOKEN) .inScenario(scenario) .whenScenarioStateIs(secondState) .willReturn(WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody(secondResponse))); wireMockServer.setScenarioState(scenario, Scenario.STARTED); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(createChatModel()) .chatMemory(createChatMemory()) .tools(new Calculator(() -> wireMockServer.setScenarioState(scenario, secondState))) .build(); String userMessage = "What is the square root of 485906798473894056 in scientific notation?"; String answer = assistant.chat(userMessage); assertThat(answer).isEqualTo( "The square root of 485,906,798,473,894,056 in scientific notation is approximately 6.97070153193991E8."); assertThat(wireMockServer.getAllServeEvents()).hasSize(2); assertSingleRequestMessage(getRequestAsMap(getRequestBody(wireMockServer.getAllServeEvents().get(1))), "What is the square root of 485906798473894056 in scientific notation?"); assertMultipleRequestMessage(getRequestAsMap(getRequestBody(wireMockServer.getAllServeEvents().get(0))), List.of( new MessageContent("user", "What is the square root of 485906798473894056 in scientific notation?"), new MessageContent("assistant", null), new MessageContent("function", "6.97070153193991E8"))); } private Map<String, Object> getRequestAsMap() throws IOException { return getRequestAsMap(getRequestBody()); } private Map<String, Object> getRequestAsMap(byte[] body) throws IOException { return mapper.readValue(body, MAP_TYPE_REF); } private byte[] getRequestBody() { assertThat(wireMockServer.getAllServeEvents()).hasSize(1); ServeEvent serveEvent = wireMockServer.getAllServeEvents().get(0); // this works because we reset requests for Wiremock before each test return getRequestBody(serveEvent); } private byte[] getRequestBody(ServeEvent serveEvent) { LoggedRequest request = serveEvent.getRequest(); assertThat(request.getBody()).isNotEmpty(); return request.getBody(); } }
[ "dev.langchain4j.service.AiServices.create", "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.model.openai.OpenAiModerationModel.builder", "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((2980, 3074), 'org.jboss.shrinkwrap.api.ShrinkWrap.create'), ((3224, 3400), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3224, 3392), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3224, 3356), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3224, 3320), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3224, 3285), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3491, 3673), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((3491, 3665), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((3491, 3629), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((3491, 3593), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((3491, 3558), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((4444, 4537), 'dev.langchain4j.service.AiServices.create'), ((5003, 5088), 'dev.langchain4j.service.AiServices.create'), ((22023, 25036), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.moderationMapping'), ((25087, 25268), 'dev.langchain4j.service.AiServices.builder'), ((25087, 25243), 'dev.langchain4j.service.AiServices.builder'), ((25087, 25185), 'dev.langchain4j.service.AiServices.builder'), ((25612, 28627), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.moderationMapping'), ((28678, 28859), 'dev.langchain4j.service.AiServices.builder'), ((28678, 28834), 'dev.langchain4j.service.AiServices.builder'), ((28678, 28776), 'dev.langchain4j.service.AiServices.builder'), ((29475, 29634), 'dev.langchain4j.service.AiServices.builder'), ((29475, 29609), 'dev.langchain4j.service.AiServices.builder'), ((29475, 29569), 'dev.langchain4j.service.AiServices.builder'), ((31712, 31871), 'dev.langchain4j.service.AiServices.builder'), ((31712, 31846), 'dev.langchain4j.service.AiServices.builder'), ((31712, 31806), 'dev.langchain4j.service.AiServices.builder'), ((34302, 34461), 'dev.langchain4j.service.AiServices.builder'), ((34302, 34436), 'dev.langchain4j.service.AiServices.builder'), ((34302, 34396), 'dev.langchain4j.service.AiServices.builder'), ((37776, 37937), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((37776, 37912), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((37776, 37872), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((37776, 37839), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((38063, 38257), 'dev.langchain4j.service.AiServices.builder'), ((38063, 38232), 'dev.langchain4j.service.AiServices.builder'), ((38063, 38176), 'dev.langchain4j.service.AiServices.builder'), ((45144, 45405), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((45144, 45303), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((45144, 45240), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((45340, 45404), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody'), ((45456, 45713), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((45456, 45610), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((45456, 45552), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((45647, 45712), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody'), ((45817, 46080), 'dev.langchain4j.service.AiServices.builder'), ((45817, 46055), 'dev.langchain4j.service.AiServices.builder'), ((45817, 45954), 'dev.langchain4j.service.AiServices.builder'), ((45817, 45906), 'dev.langchain4j.service.AiServices.builder')]
package dev.langchain4j.agent.tool.graalvm; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.*; class GraalVmJavaScriptExecutionToolIT { OpenAiChatModel model = OpenAiChatModel.builder() .baseUrl(System.getenv("OPENAI_BASE_URL")) .apiKey(System.getenv("OPENAI_API_KEY")) .organizationId(System.getenv("OPENAI_ORGANIZATION_ID")) .build(); interface Assistant { String chat(String userMessage); } @Test public void should_execute_tool() { GraalVmJavaScriptExecutionTool tool = spy(new GraalVmJavaScriptExecutionTool()); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(model) .tools(tool) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .build(); String answer = assistant.chat("What is the square root of 485906798473894056 in scientific notation?"); assertThat(answer).contains("6.97"); verify(tool).executeJavaScriptCode(contains("485906798473894056")); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((403, 626), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((403, 605), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((403, 536), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((403, 483), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((875, 1079), 'dev.langchain4j.service.AiServices.builder'), ((875, 1054), 'dev.langchain4j.service.AiServices.builder'), ((875, 981), 'dev.langchain4j.service.AiServices.builder'), ((875, 952), 'dev.langchain4j.service.AiServices.builder')]
import dev.langchain4j.code.Judge0JavaScriptExecutionTool; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import static java.time.Duration.ofSeconds; public class ServiceWithDynamicToolsExample { interface Assistant { String chat(String message); } public static void main(String[] args) { Judge0JavaScriptExecutionTool judge0Tool = new Judge0JavaScriptExecutionTool(ApiKeys.RAPID_API_KEY); ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .temperature(0.0) .timeout(ofSeconds(60)) .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) .tools(judge0Tool) .build(); interact(assistant, "What is the square root of 49506838032859?"); interact(assistant, "Capitalize every third letter: abcabc"); interact(assistant, "What is the number of hours between 17:00 on 21 Feb 1988 and 04:00 on 12 Apr 2014?"); } private static void interact(Assistant assistant, String userMessage) { System.out.println("[User]: " + userMessage); String answer = assistant.chat(userMessage); System.out.println("[Assistant]: " + answer); System.out.println(); System.out.println(); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((634, 806), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 781), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 741), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 707), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((839, 1061), 'dev.langchain4j.service.AiServices.builder'), ((839, 1036), 'dev.langchain4j.service.AiServices.builder'), ((839, 1001), 'dev.langchain4j.service.AiServices.builder'), ((839, 928), 'dev.langchain4j.service.AiServices.builder')]
import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.MemoryId; import dev.langchain4j.service.UserMessage; public class ServiceWithMemoryForEachUserExample { interface Assistant { String chat(@MemoryId int memoryId, @UserMessage String userMessage); } public static void main(String[] args) { Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(10)) .build(); System.out.println(assistant.chat(1, "Hello, my name is Klaus")); // Hi Klaus! How can I assist you today? System.out.println(assistant.chat(2, "Hello, my name is Francine")); // Hello Francine! How can I assist you today? System.out.println(assistant.chat(1, "What is my name?")); // Your name is Klaus. System.out.println(assistant.chat(2, "What is my name?")); // Your name is Francine. } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((482, 722), 'dev.langchain4j.service.AiServices.builder'), ((482, 697), 'dev.langchain4j.service.AiServices.builder'), ((482, 604), 'dev.langchain4j.service.AiServices.builder')]
import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.memory.chat.ChatMemoryStore; import org.mapdb.DB; import org.mapdb.DBMaker; import java.util.List; import java.util.Map; import static dev.langchain4j.data.message.ChatMessageDeserializer.messagesFromJson; import static dev.langchain4j.data.message.ChatMessageSerializer.messagesToJson; import static org.mapdb.Serializer.STRING; public class ServiceWithPersistentMemoryExample { interface Assistant { String chat(String message); } public static void main(String[] args) { ChatMemory chatMemory = MessageWindowChatMemory.builder() .maxMessages(10) .chatMemoryStore(new PersistentChatMemoryStore()) .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .chatMemory(chatMemory) .build(); String answer = assistant.chat("Hello! My name is Klaus."); System.out.println(answer); // Hello Klaus! How can I assist you today? // Now, comment out the two lines above, uncomment the two lines below, and run again. // String answerWithName = assistant.chat("What is my name?"); // System.out.println(answerWithName); // Your name is Klaus. } // You can create your own implementation of ChatMemoryStore and store chat memory whenever you'd like static class PersistentChatMemoryStore implements ChatMemoryStore { private final DB db = DBMaker.fileDB("chat-memory.db").transactionEnable().make(); private final Map<String, String> map = db.hashMap("messages", STRING, STRING).createOrOpen(); @Override public List<ChatMessage> getMessages(Object memoryId) { String json = map.get((String) memoryId); return messagesFromJson(json); } @Override public void updateMessages(Object memoryId, List<ChatMessage> messages) { String json = messagesToJson(messages); map.put((String) memoryId, json); db.commit(); } @Override public void deleteMessages(Object memoryId) { map.remove((String) memoryId); db.commit(); } } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder" ]
[((809, 966), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((809, 941), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((809, 875), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((999, 1186), 'dev.langchain4j.service.AiServices.builder'), ((999, 1161), 'dev.langchain4j.service.AiServices.builder'), ((999, 1121), 'dev.langchain4j.service.AiServices.builder'), ((1792, 1851), 'org.mapdb.DBMaker.fileDB'), ((1792, 1844), 'org.mapdb.DBMaker.fileDB')]
import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.structured.StructuredPrompt; import dev.langchain4j.model.input.structured.StructuredPromptProcessor; import dev.langchain4j.model.openai.OpenAiChatModel; import java.util.List; import static java.time.Duration.ofSeconds; import static java.util.Arrays.asList; public class StructuredPromptTemplateExamples { static ChatLanguageModel model = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); static class Simple_Structured_Prompt_Example { @StructuredPrompt("Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}") static class CreateRecipePrompt { private String dish; private List<String> ingredients; } public static void main(String[] args) { CreateRecipePrompt createRecipePrompt = new CreateRecipePrompt(); createRecipePrompt.dish = "salad"; createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives"); Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt); AiMessage aiMessage = model.generate(prompt.toUserMessage()).content(); System.out.println(aiMessage.text()); } } static class Multi_Line_Structured_Prompt_Example { @StructuredPrompt({ "Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}.", "Structure your answer in the following way:", "Recipe name: ...", "Description: ...", "Preparation time: ...", "Required ingredients:", "- ...", "- ...", "Instructions:", "- ...", "- ..." }) static class CreateRecipePrompt { private String dish; private List<String> ingredients; } public static void main(String[] args) { CreateRecipePrompt createRecipePrompt = new CreateRecipePrompt(); createRecipePrompt.dish = "salad"; createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives"); Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt); AiMessage aiMessage = model.generate(prompt.toUserMessage()).content(); System.out.println(aiMessage.text()); } } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((528, 654), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((528, 633), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((528, 597), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
import dev.langchain4j.model.openai.OpenAiChatModel; import java.net.InetSocketAddress; import java.net.Proxy; import static java.net.Proxy.Type.HTTP; public class ProxyExample { public static void main(String[] args) { OpenAiChatModel model = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .proxy(new Proxy(HTTP, new InetSocketAddress("39.175.77.7", 30001))) .build(); String answer = model.generate("hello"); System.out.println(answer); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((261, 444), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((261, 419), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((261, 334), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
import dev.langchain4j.chain.ConversationalChain; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.TokenWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import java.io.IOException; import static dev.langchain4j.data.message.UserMessage.userMessage; public class ChatMemoryExamples { // See also ServiceWithMemoryExample and ServiceWithMemoryForEachUserExample public static class ConversationalChain_Example { public static void main(String[] args) throws IOException { ConversationalChain chain = ConversationalChain.builder() .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) // .chatMemory() // you can override default chat memory .build(); String answer = chain.execute("Hello, my name is Klaus"); System.out.println(answer); // Hello Klaus! How can I assist you today? String answerWithName = chain.execute("What is my name?"); System.out.println(answerWithName); // Your name is Klaus. } } public static class If_You_Need_More_Control { public static void main(String[] args) { ChatLanguageModel model = OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY); ChatMemory chatMemory = TokenWindowChatMemory.withMaxTokens(300, new OpenAiTokenizer("gpt-3.5-turbo")); // You have full control over the chat memory. // You can decide if you want to add a particular message to the memory // (e.g. you might not want to store few-shot examples to save on tokens). // You can process/modify the message before saving if required. chatMemory.add(userMessage("Hello, my name is Klaus")); AiMessage answer = model.generate(chatMemory.messages()).content(); System.out.println(answer.text()); // Hello Klaus! How can I assist you today? chatMemory.add(answer); chatMemory.add(userMessage("What is my name?")); AiMessage answerWithName = model.generate(chatMemory.messages()).content(); System.out.println(answerWithName.text()); // Your name is Klaus. chatMemory.add(answerWithName); } } }
[ "dev.langchain4j.chain.ConversationalChain.builder" ]
[((736, 962), 'dev.langchain4j.chain.ConversationalChain.builder'), ((736, 856), 'dev.langchain4j.chain.ConversationalChain.builder')]
package com.google.cloud.devrel.docchat; import com.vladsch.flexmark.ext.gfm.strikethrough.StrikethroughExtension; import com.vladsch.flexmark.ext.tables.TablesExtension; import com.vladsch.flexmark.html.HtmlRenderer; import com.vladsch.flexmark.parser.Parser; import com.vladsch.flexmark.util.ast.Node; import com.vladsch.flexmark.util.data.MutableDataSet; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.vertexai.VertexAiEmbeddingModel; import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import dev.langchain4j.store.memory.chat.InMemoryChatMemoryStore; import io.micronaut.core.io.IOUtils; import io.micronaut.core.io.Readable; import io.micronaut.core.io.ResourceResolver; import jakarta.inject.Singleton; import java.io.BufferedReader; import java.io.IOException; import java.net.URL; import java.util.Arrays; import java.util.Optional; @Singleton public class LLMQueryService { private final PromptTemplate promptTemplate; private final VertexAiEmbeddingModel embeddingModel; private final VertexAiGeminiChatModel geminiChatModel; private final InMemoryEmbeddingStore<TextSegment> embeddingStore; private final InMemoryChatMemoryStore chatMemoryStore; private final EmbeddingStoreRetriever retriever; public LLMQueryService() { this.embeddingModel = VertexAiEmbeddingModel.builder() .endpoint("us-central1-aiplatform.googleapis.com:443") .project("genai-java-demos") .location("us-central1") .publisher("google") .modelName("textembedding-gecko@latest") .maxRetries(3) .build(); this.geminiChatModel = VertexAiGeminiChatModel.builder() .project("genai-java-demos") .location("us-central1") .modelName("gemini-pro") .temperature(0.3f) .maxRetries(3) .build(); Optional<URL> resource = new ResourceResolver().getResource("classpath:saved-embedding-store.json"); Readable savedEmbeddingStore = Readable.of(resource.get()); String storeJson = "{}"; try { storeJson = IOUtils.readText(new BufferedReader(savedEmbeddingStore.asReader())); System.out.println("Read " + storeJson.length() + " bytes of saved embeddings"); } catch (IOException ioe) { System.err.println("Impossible to read saved embeddings"); ioe.printStackTrace(); } this.embeddingStore = InMemoryEmbeddingStore.fromJson(storeJson); System.out.println("In-memory embedding store loaded"); this.retriever = EmbeddingStoreRetriever.from(embeddingStore, embeddingModel); this.promptTemplate = PromptTemplate.from(""" You are an expert in the Apache Groovy programming language. You are also knowledgeable in the Java language, but be sure to write idiomatic Groovy code in your answers. You excel at teaching and explaining concepts of the language. If you don't know the answer to the question, say that you don't know the answer, and that the user should refer to the Groovy documentation. Answer the following question to the best of your ability: Using the Groovy language, {{question}} Base your answer exclusively on the following information from the Groovy documentation: {{information}} """); this.chatMemoryStore = new InMemoryChatMemoryStore(); } public String executeWithMemory(String query, String chatId) { MessageWindowChatMemory chatMemory = MessageWindowChatMemory.builder() .chatMemoryStore(chatMemoryStore) .id(chatId) .maxMessages(11) .build(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(geminiChatModel) .chatMemory(chatMemory) .promptTemplate(promptTemplate) .retriever(retriever) .build(); System.out.println("query = " + query); String response = chain.execute(query); System.out.println("response = " + response); return renderMarkdownToHtml(response); } private static String renderMarkdownToHtml(String markdown) { MutableDataSet options = new MutableDataSet(); options.set(Parser.EXTENSIONS, Arrays.asList(TablesExtension.create(), StrikethroughExtension.create())); Parser parser = Parser.builder(options).build(); HtmlRenderer renderer = HtmlRenderer.builder(options).build(); Node document = parser.parse(markdown); return renderer.render(document); } }
[ "dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder", "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder", "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder" ]
[((1644, 1955), 'dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder'), ((1644, 1934), 'dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder'), ((1644, 1907), 'dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder'), ((1644, 1854), 'dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder'), ((1644, 1821), 'dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder'), ((1644, 1784), 'dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder'), ((1644, 1743), 'dev.langchain4j.model.vertexai.VertexAiEmbeddingModel.builder'), ((1989, 2216), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1989, 2195), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1989, 2168), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1989, 2137), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1989, 2100), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1989, 2063), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4003, 4156), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((4003, 4135), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((4003, 4106), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((4003, 4082), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((4204, 4425), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((4204, 4404), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((4204, 4370), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((4204, 4326), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((4204, 4290), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((4893, 4924), 'com.vladsch.flexmark.parser.Parser.builder'), ((4958, 4995), 'com.vladsch.flexmark.html.HtmlRenderer.builder')]
package org.open4goods.ui.config; import java.io.IOException; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.open4goods.dao.ProductRepository; import org.open4goods.helper.DevModeService; import org.open4goods.model.constants.CacheConstants; import org.open4goods.model.constants.Currency; import org.open4goods.model.data.Price; import org.open4goods.model.product.Product; import org.open4goods.services.BarcodeValidationService; import org.open4goods.services.BrandService; import org.open4goods.services.DataSourceConfigService; import org.open4goods.services.EvaluationService; import org.open4goods.services.FeedbackService; import org.open4goods.services.GoogleTaxonomyService; import org.open4goods.services.ImageMagickService; import org.open4goods.services.MailService; import org.open4goods.services.RecaptchaService; import org.open4goods.services.RemoteFileCachingService; import org.open4goods.services.ResourceService; import org.open4goods.services.SearchService; import org.open4goods.services.SerialisationService; import org.open4goods.services.StandardiserService; import org.open4goods.services.VerticalsConfigService; import org.open4goods.services.XwikiService; import org.open4goods.services.ai.AiAgent; import org.open4goods.services.ai.AiService; import org.open4goods.ui.config.yml.UiConfig; import org.open4goods.ui.services.BlogService; import org.open4goods.ui.services.GtinService; import org.open4goods.ui.services.ImageService; import org.open4goods.ui.services.OpenDataService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.cache.CacheManager; import org.springframework.cache.caffeine.CaffeineCache; import org.springframework.cache.support.SimpleCacheManager; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.io.support.ResourcePatternResolver; import org.springframework.data.redis.connection.RedisConnectionFactory; import org.springframework.data.redis.core.RedisTemplate; import org.springframework.data.redis.serializer.GenericJackson2JsonRedisSerializer; import org.springframework.data.redis.serializer.StringRedisSerializer; import org.springframework.mail.javamail.JavaMailSender; import org.springframework.security.authentication.AuthenticationProvider; import org.springframework.security.web.firewall.HttpFirewall; import org.springframework.security.web.firewall.StrictHttpFirewall; import org.springframework.web.servlet.LocaleResolver; import org.springframework.web.servlet.i18n.AcceptHeaderLocaleResolver; import org.springframework.web.servlet.i18n.LocaleChangeInterceptor; import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.Ticker; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.service.AiServices; @Configuration public class AppConfig { // TODO : Cache period pageNumber conf public static final int CACHE_PERIOD_SECONDS = 3600*24*7; private final UiConfig config; public AppConfig(UiConfig config) { this.config = config; } @Bean BarcodeValidationService barcodeValidationService () { return new BarcodeValidationService(); } @Bean @Autowired public DevModeService devModeService (ProductRepository repository, SerialisationService serialisationService, VerticalsConfigService verticalsConfigService) { return new DevModeService(config.getDevModeConfig(),repository, serialisationService, verticalsConfigService); } @Bean public RedisTemplate<String, Product> redisTemplate(RedisConnectionFactory connectionFactory) { RedisTemplate<String, Product> template = new RedisTemplate<>(); template.setConnectionFactory(connectionFactory); // Configure serialization template.setKeySerializer(new StringRedisSerializer()); template.setValueSerializer(new GenericJackson2JsonRedisSerializer()); // Add some specific configuration here. Key serializers, etc. return template; } @Bean public BlogService blogService(@Autowired XwikiService xwikiService, @Autowired UiConfig config) { return new BlogService(xwikiService, config.getBlogConfig(), config.getNamings().getBaseUrls()); } @Bean FeedbackService feedbackService(@Autowired UiConfig config) { return new FeedbackService(config.getFeedbackConfig()); } @Bean ImageService imageService(@Autowired ImageMagickService imageMagickService, @Autowired ResourceService resourceService) { return new ImageService(imageMagickService, resourceService); } @Bean @Autowired AiService aiService (AiAgent nudgerAgent, VerticalsConfigService verticalService, EvaluationService spelEvaluationService) { return new AiService(nudgerAgent, verticalService, spelEvaluationService); } @Bean AiAgent nudgerAgent(@Autowired ChatLanguageModel chatLanguageModel) { return AiServices.builder(AiAgent.class) .chatLanguageModel(chatLanguageModel) // .retriever(retriever) .build(); } @Bean BrandService brandService(@Autowired RemoteFileCachingService rfc, @Autowired UiConfig properties) { return new BrandService(properties.getBrandConfig(), rfc); } @Bean OpenDataService openDataService(@Autowired ProductRepository aggregatedDataRepository, @Autowired UiConfig props) { return new OpenDataService(aggregatedDataRepository, props); } // TODO(note) : DISABLING SITE MAP GENERATION // @Bean // public SitemapGenerationService sitemapGenerationService (@Autowired ProductRepository aggregatedDataRepository, @Autowired UiConfig props ) { // return new SitemapGenerationService(aggregatedDataRepository, props); // } // @Bean AuthenticationProvider capsuleAuthenticationProvider(@Autowired XwikiService xwikiService) { return new XwikiAuthenticationProvider(xwikiService); } @Bean XwikiService xwikiService(@Autowired UiConfig props) { return new XwikiService(props.getWikiConfig()); } /** The bean providing datasource configurations **/ @Bean DataSourceConfigService datasourceConfigService(@Autowired final UiConfig config) { return new DataSourceConfigService(config.getDatasourcesfolder()); } @Bean RecaptchaService recaptchaService() { return new RecaptchaService(); } @Bean MailService mailService(@Autowired final JavaMailSender sender) { return new MailService(sender); } @Bean ImageMagickService imageMagickService() { return new ImageMagickService(); } @Bean ResourceService resourceService() { return new ResourceService(config.getRemoteCachingFolder()); } @Bean GtinService gtinService(@Autowired ResourceService resourceService) { return new GtinService(resourceService); } @Bean ProductRepository aggregatedDataRepo() { return new ProductRepository(); } @Bean SearchService searchService(@Autowired ProductRepository aggregatedDataRepository, @Autowired final UiConfig uiconfig) { return new SearchService(aggregatedDataRepository, uiconfig.logsFolder()); } @Bean StandardiserService standardiserService() { return new StandardiserService() { @Override public void standarise(final Price price, final Currency currency) { } }; } @Bean EvaluationService evaluationService() { return new EvaluationService(); } @Bean @Autowired VerticalsConfigService verticalConfigsService(ResourcePatternResolver resourceResolver, SerialisationService serialisationService, GoogleTaxonomyService googleTaxonomyService, ProductRepository productRepository) throws IOException { return new VerticalsConfigService( serialisationService,config.getVerticalsFolder(), googleTaxonomyService, productRepository, resourceResolver); } //////////////////////////////////// // Locale resolution //////////////////////////////////// @Bean LocaleResolver localeResolver() { return new AcceptHeaderLocaleResolver(); } static LocaleChangeInterceptor localeChangeInterceptor() { final LocaleChangeInterceptor lci = new LocaleChangeInterceptor(); lci.setParamName("lang"); return lci; } @Bean HttpFirewall allowUrlEncodedSlashHttpFirewall() { StrictHttpFirewall firewall = new StrictHttpFirewall(); firewall.setAllowUrlEncodedSlash(true); firewall.setAllowBackSlash(true); firewall.setAllowNull(true); firewall.setAllowSemicolon(true); firewall.setUnsafeAllowAnyHttpMethod(true); firewall.setUnsafeAllowAnyHttpMethod(true); firewall.setAllowUrlEncodedPercent(true); return firewall; } ////////////////////////////////////////////// // The uidMap managers ////////////////////////////////////////////// @Bean CacheManager cacheManager(@Autowired final Ticker ticker) { final CaffeineCache fCache = buildCache(CacheConstants.FOREVER_LOCAL_CACHE_NAME, ticker, 30000000); final CaffeineCache hCache = buildCache(CacheConstants.ONE_HOUR_LOCAL_CACHE_NAME, ticker, 60); final CaffeineCache mCache = buildCache(CacheConstants.ONE_MINUTE_LOCAL_CACHE_NAME, ticker, 1); final CaffeineCache dCache = buildCache(CacheConstants.ONE_DAY_LOCAL_CACHE_NAME, ticker, 60 * 24); final SimpleCacheManager manager = new SimpleCacheManager(); manager.setCaches(Arrays.asList(fCache, dCache, hCache,mCache)); return manager; } private CaffeineCache buildCache(final String name, final Ticker ticker, final int minutesToExpire) { return new CaffeineCache(name, Caffeine.newBuilder().expireAfterWrite(minutesToExpire, TimeUnit.MINUTES).ticker(ticker).build()); } @Bean Ticker ticker() { return Ticker.systemTicker(); } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((4958, 5130), 'dev.langchain4j.service.AiServices.builder'), ((4958, 5046), 'dev.langchain4j.service.AiServices.builder'), ((9475, 9571), 'com.github.benmanes.caffeine.cache.Caffeine.newBuilder'), ((9475, 9563), 'com.github.benmanes.caffeine.cache.Caffeine.newBuilder'), ((9475, 9548), 'com.github.benmanes.caffeine.cache.Caffeine.newBuilder')]
package com.example.demo.configuration; import dev.langchain4j.model.azure.AzureOpenAiChatModel; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.mistralai.MistralAiChatModel; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @Configuration public class ChatModelConfiguration { @Bean ChatLanguageModel openAIChatLanguageModel() { return AzureOpenAiChatModel.builder() .endpoint(System.getenv("AZURE_OPENAI_ENDPOINT")) .apiKey(System.getenv("AZURE_OPENAI_KEY")) .deploymentName("gpt-4") .logRequestsAndResponses(true) .build(); } ChatLanguageModel mistralAIChatLanguageModel() { return MistralAiChatModel.builder() .baseUrl(System.getenv("MISTRAL_AI_BASE_URL")) .apiKey(System.getenv("MISTRAL_AI_KEY")) .logRequests(true) .logResponses(true) .build(); } }
[ "dev.langchain4j.model.mistralai.MistralAiChatModel.builder", "dev.langchain4j.model.azure.AzureOpenAiChatModel.builder" ]
[((453, 721), 'dev.langchain4j.model.azure.AzureOpenAiChatModel.builder'), ((453, 696), 'dev.langchain4j.model.azure.AzureOpenAiChatModel.builder'), ((453, 649), 'dev.langchain4j.model.azure.AzureOpenAiChatModel.builder'), ((453, 608), 'dev.langchain4j.model.azure.AzureOpenAiChatModel.builder'), ((453, 549), 'dev.langchain4j.model.azure.AzureOpenAiChatModel.builder'), ((798, 1042), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((798, 1017), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((798, 981), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((798, 946), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((798, 889), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder')]
package io.github.btarg.javaOpenAI.openai; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.UserMessage; import dev.langchain4j.service.UserName; import dev.langchain4j.store.memory.chat.ChatMemoryStore; import io.github.btarg.javaOpenAI.openai.memory.PersistentChatMemoryStore; import io.github.btarg.javaOpenAI.openai.tools.Calculator; import io.github.btarg.javaOpenAI.openai.tools.CommandTool; import io.github.btarg.javaOpenAI.openai.tools.ItemTool; import io.github.btarg.javaOpenAI.openai.tools.PlayerTool; import org.bukkit.command.CommandSender; import org.bukkit.entity.Player; public class ChatGPTAPI { ChatMemoryStore chatMemoryStore; public ChatGPTAPI(ChatMemoryStore chatMemoryStore) { this.chatMemoryStore = chatMemoryStore; } public String GetResponse(Player sender, String message) { String senderUUID = sender.getUniqueId().toString(); Object[] tools = new Object[] { new Calculator(), new CommandTool(), new PlayerTool(), new ItemTool() }; ChatMemory chatMemory = MessageWindowChatMemory.builder() .maxMessages(10) .chatMemoryStore(new PersistentChatMemoryStore()) .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(OpenAiChatModel.withApiKey(System.getenv("OPENAI_API_KEY"))) .tools(tools) .chatMemory(chatMemory) .build(); return assistant.chat(senderUUID, message); } interface Assistant { String chat(@UserName String userUUID, @UserMessage String userMessage); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder" ]
[((1290, 1447), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((1290, 1422), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((1290, 1356), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((1480, 1706), 'dev.langchain4j.service.AiServices.builder'), ((1480, 1681), 'dev.langchain4j.service.AiServices.builder'), ((1480, 1641), 'dev.langchain4j.service.AiServices.builder'), ((1480, 1611), 'dev.langchain4j.service.AiServices.builder')]
package com.tencent.supersonic.headless.core.chat.parser.llm; import com.tencent.supersonic.common.util.JsonUtil; import com.tencent.supersonic.headless.core.config.OptimizationConfig; import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMReq; import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMReq.SqlGenerationMode; import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMResp; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.output.Response; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.tuple.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.stream.Collectors; @Service @Slf4j public class OnePassSCSqlGeneration implements SqlGeneration, InitializingBean { private static final Logger keyPipelineLog = LoggerFactory.getLogger("keyPipeline"); @Autowired private ChatLanguageModel chatLanguageModel; @Autowired private SqlExamplarLoader sqlExamplarLoader; @Autowired private OptimizationConfig optimizationConfig; @Autowired private SqlPromptGenerator sqlPromptGenerator; @Override public LLMResp generation(LLMReq llmReq, Long dataSetId) { //1.retriever sqlExamples and generate exampleListPool keyPipelineLog.info("dataSetId:{},llmReq:{}", dataSetId, llmReq); List<Map<String, String>> sqlExamples = sqlExamplarLoader.retrieverSqlExamples(llmReq.getQueryText(), optimizationConfig.getText2sqlExampleNum()); List<List<Map<String, String>>> exampleListPool = sqlPromptGenerator.getExampleCombos(sqlExamples, optimizationConfig.getText2sqlFewShotsNum(), optimizationConfig.getText2sqlSelfConsistencyNum()); //2.generator linking and sql prompt by sqlExamples,and parallel generate response. List<String> linkingSqlPromptPool = sqlPromptGenerator.generatePromptPool(llmReq, exampleListPool, true); List<String> llmResults = new CopyOnWriteArrayList<>(); linkingSqlPromptPool.parallelStream().forEach(linkingSqlPrompt -> { Prompt prompt = PromptTemplate.from(JsonUtil.toString(linkingSqlPrompt)) .apply(new HashMap<>()); keyPipelineLog.info("request prompt:{}", prompt.toSystemMessage()); Response<AiMessage> response = chatLanguageModel.generate(prompt.toSystemMessage()); String result = response.content().text(); llmResults.add(result); keyPipelineLog.info("model response:{}", result); } ); //3.format response. List<String> schemaLinkingResults = llmResults.stream() .map(llmResult -> OutputFormat.getSchemaLinks(llmResult)).collect(Collectors.toList()); List<String> candidateSortedList = OutputFormat.formatList(schemaLinkingResults); Pair<String, Map<String, Double>> linkingMap = OutputFormat.selfConsistencyVote(candidateSortedList); List<String> sqlList = llmResults.stream() .map(llmResult -> OutputFormat.getSql(llmResult)).collect(Collectors.toList()); Pair<String, Map<String, Double>> sqlMapPair = OutputFormat.selfConsistencyVote(sqlList); keyPipelineLog.info("linkingMap:{} sqlMap:{}", linkingMap, sqlMapPair.getRight()); LLMResp result = new LLMResp(); result.setQuery(llmReq.getQueryText()); result.setSqlRespMap(OutputFormat.buildSqlRespMap(sqlExamples, sqlMapPair.getRight())); return result; } @Override public void afterPropertiesSet() { SqlGenerationFactory.addSqlGenerationForFactory(SqlGenerationMode.ONE_PASS_AUTO_COT_SELF_CONSISTENCY, this); } }
[ "dev.langchain4j.model.input.PromptTemplate.from" ]
[((2569, 2677), 'dev.langchain4j.model.input.PromptTemplate.from')]
package org.agoncal.fascicle.langchain4j.firstlook; // tag::adocSnippet[] import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.time.Duration.ofSeconds; import java.net.Proxy; // tag::adocSkip[] /** * @author Antonio Goncalves * http://www.antoniogoncalves.org * -- */ // end::adocSkip[] public class AuthorService { public static void main(String[] args) { AuthorService authorService = new AuthorService(); System.out.println(authorService.getIsaacAsimovBiography()); } private static final String OPENAI_API_KEY = System.getenv("OPENAI_API_KEY"); // private String[] scifiAuthors = {"Isaac Asimov", "Nora Jemisin", "Douglas Adams"}; public String getIsaacAsimovBiography() { ChatLanguageModel model = OpenAiChatModel.builder() //.baseUrl("http://localhost:8089") .proxy(new Proxy(Proxy.Type.HTTP, new java.net.InetSocketAddress("localhost", 8089))) .apiKey(OPENAI_API_KEY) .modelName(GPT_3_5_TURBO) .temperature(0.3) .timeout(ofSeconds(60)) .logRequests(true) .logResponses(true) .build(); String prompt = "Write a short biography about Isaac Asimov"; String biography = model.generate(prompt); return biography; } // @GET // @Path("/{index}") // public String getAuthorBiography(@PathParam("index") int index) { // // ChatLanguageModel model = OpenAiChatModel.builder() // .apiKey(OPENAI_API_KEY) // .modelName(GPT_3_5_TURBO) // .temperature(0.3) // .timeout(ofSeconds(60)) // .logRequests(true) // .logResponses(true) // .build(); // // String prompt = "Write a short biography about " + scifiAuthors[index]; // // String biography = model.generate(prompt); // // return biography; // } } // end::adocSnippet[]
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((884, 1225), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((884, 1210), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((884, 1184), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((884, 1159), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((884, 1129), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((884, 1105), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((884, 1073), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((884, 1043), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package io.github.gabrielpadilh4.services; import java.util.HashMap; import java.util.Map; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiModelName; /** * @author [email protected] */ public class LLMService { public static String askChatGPT(String error, String apiKey) { PromptTemplate promptTemplate = PromptTemplate.from("How to fix {{error}}"); Map<String, Object> variables = new HashMap<>(); variables.put("error", error); Prompt prompt = promptTemplate.apply(variables); return withModel(apiKey).generate(prompt.text()); } private static ChatLanguageModel withModel(String apiKey) { return OpenAiChatModel.builder() .apiKey(apiKey) .modelName(OpenAiModelName.GPT_3_5_TURBO) .temperature(0.3) .build(); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((821, 955), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((821, 940), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((821, 916), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((821, 868), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package com.adrianbadarau.langchainjava.config; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.BertTokenizer; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.retriever.Retriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.core.io.Resource; import org.springframework.core.io.ResourceLoader; import java.io.IOException; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; @Configuration public class LLMConfig { @Bean EmbeddingModel embeddingModel(){ return new AllMiniLmL6V2EmbeddingModel(); } @Bean EmbeddingStore<TextSegment> embeddingStore(EmbeddingModel embeddingModel, ResourceLoader resourceLoader) throws IOException { // I'm going to use the in memory store for now but in the future we should switch to a vector DB InMemoryEmbeddingStore<TextSegment> store = new InMemoryEmbeddingStore<>(); Resource resource = resourceLoader.getResource("classpath:t&c.text"); Document document = loadDocument(resource.getFile().toPath()); DocumentSplitter splitter = DocumentSplitters.recursive(100, 0, new BertTokenizer()); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(splitter) .embeddingModel(embeddingModel) .embeddingStore(store) .build(); ingestor.ingest(document); return store; } }
[ "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((1758, 1946), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1758, 1921), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1758, 1882), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1758, 1834), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')]
package ru.vzotov.ai.application; import com.google.common.collect.Lists; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.Metadata; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import lombok.Builder; import ru.vzotov.purchase.domain.model.Purchase; import java.util.List; import java.util.Objects; import static java.util.Collections.singletonList; public class PurchaseCategoryProcessor { private final EmbeddingStoreIngestor ingestor; private final int partitionSize; @Builder public PurchaseCategoryProcessor( EmbeddingModel embeddingModel, EmbeddingStore<TextSegment> embeddingStore, int partitionSize ) { if(partitionSize <= 0) throw new IllegalArgumentException("partitionSize must be > 0"); this.partitionSize = partitionSize; Objects.requireNonNull(embeddingModel); Objects.requireNonNull(embeddingStore); this.ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(doc -> singletonList(doc.toTextSegment())) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); } public void process(List<Purchase> purchases) { Lists.partition(purchases, partitionSize) .stream() .map(this::transform) .forEach(ingestor::ingest); } List<Document> transform(List<Purchase> purchases) { return purchases.stream() .map(ItemAction::new) .map(action -> new Document(action.text(), action.metadata())) .toList(); } static class ItemAction { private static final String ENTITY_PURCHASE = "purchase"; private static final String F_ID = "entityId"; private static final String F_ENTITY = "entity"; private static final String F_REFERENCE_ID = "reference_id"; private static final String F_LAST_MODIFIED = "last_modified"; private final Purchase purchase; private final String text; public ItemAction(Purchase purchase) { this.purchase = purchase; this.text = "Purchase '%s' has category '%s' with id '%s'." .formatted(purchase.name(), purchase.category().name(), purchase.category().categoryId().value()); } public Purchase purchase() { return purchase; } public String text() { return text; } public Metadata metadata() { return Metadata.from(F_ID, purchase().purchaseId().value()) .add(F_ENTITY, ENTITY_PURCHASE) .add(F_LAST_MODIFIED, String.valueOf(purchase().updatedOn().toEpochMilli())) .add(F_REFERENCE_ID, purchase().category().categoryId().value()); } } }
[ "dev.langchain4j.data.document.Metadata.from", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder" ]
[((1157, 1387), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1157, 1362), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1157, 1314), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1157, 1266), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1456, 1604), 'com.google.common.collect.Lists.partition'), ((1456, 1561), 'com.google.common.collect.Lists.partition'), ((1456, 1523), 'com.google.common.collect.Lists.partition'), ((2768, 3054), 'dev.langchain4j.data.document.Metadata.from'), ((2768, 2969), 'dev.langchain4j.data.document.Metadata.from'), ((2768, 2872), 'dev.langchain4j.data.document.Metadata.from')]
package org.jugph; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.UserMessage; import java.time.LocalDate; import static java.time.Duration.ofSeconds; public class JUGPHMemberExtractorAIServiceExample { public record JUGPHMember(String firstName, String lastName, String email, JUGPHMember.Gender gender, LocalDate registrationDate) { enum Gender { MALE, FEMALE, NON_BINARY, NOT_SAID } @Override public String toString() { return "JUGPHMember {" + " firstName = \"" + firstName + "\"" + ", lastName = \"" + lastName + "\"" + ", email = \"" + email + "\"" + ", gender = \"" + gender.name().toLowerCase() + "\"" + ", registrationDate = " + registrationDate + " }"; } } interface MemberExtractor { @UserMessage("Extract member information from the following text: {{it}}. Infer the gender if not explicitly said.") JUGPHMember extractMemberFrom(String text); } public static void main(String[] args) { var model = OpenAiChatModel.builder() .apiKey(System.getenv("OPENAI_API_KEY")) .timeout(ofSeconds(120)) // .logRequests(true) // .logResponses(true) .build(); MemberExtractor extractor = AiServices.create(MemberExtractor.class, model); var text = "New member alert: Maria Clara, a passionate Java developer, has just joined the JUGPH community. " + "Her email, [email protected], was sent out on the 17th of November, 2023."; JUGPHMember member = extractor.extractMemberFrom(text); System.out.println(member); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1227, 1450), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1227, 1350), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1227, 1309), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package com.docuverse.backend.services; import com.docuverse.backend.dtos.ChatRequestDTO; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.memory.chat.ChatMemoryStore; import io.github.cdimascio.dotenv.Dotenv; import org.mapdb.DB; import org.mapdb.DBMaker; import org.springframework.stereotype.Service; import java.util.List; import java.util.Map; import static dev.langchain4j.data.message.ChatMessageDeserializer.messagesFromJson; import static dev.langchain4j.data.message.ChatMessageSerializer.messagesToJson; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.time.Duration.ofSeconds; import static org.mapdb.Serializer.STRING; @Service public class ChatServiceImpl implements ChatService { private final EmbeddingStore<TextSegment> embeddingStore; private final EmbeddingModel embeddingModel; public ChatServiceImpl(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) { this.embeddingStore = embeddingStore; this.embeddingModel = embeddingModel; } @Override public String processChat(ChatRequestDTO request) { Dotenv dotenv = Dotenv.load(); // Create a prompt template PromptTemplate promptTemplate = PromptTemplate.from( "Answer the question as truthfully as possible using the information below, and if the answer is not within the information, say 'I don't know.\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Information:\n" + "{{information}}"); System.out.println("checkpoint 3"); try { // Send the prompt to the OpenAI chat model ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(dotenv.get("OPENAI_API_KEY")) .modelName(GPT_3_5_TURBO) .temperature(0.7) .timeout(ofSeconds(15)) .maxRetries(3) .logResponses(true) .logRequests(true) .build(); ChatMemory chatMemory = MessageWindowChatMemory.builder() .maxMessages(10) .chatMemoryStore(new PersistentChatMemoryStore()) .build(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatModel) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel, 1, 0.9)) .chatMemory(chatMemory) // you can override default chat memory .promptTemplate(promptTemplate) // you can override default prompt template .build(); String answer = chain.execute(request.question()); System.out.println(chatMemory.messages()); return answer; } catch (Exception e) { // Handle the exception here, you can log it or return an error message e.printStackTrace(); // Print the exception stack trace for debugging return "An error occurred while processing the request."; } } // public String processChat(ChatRequest request) { // // Dotenv dotenv = Dotenv.load(); // // // Create a prompt template // PromptTemplate promptTemplate = PromptTemplate.from( // "Answer the question as truthfully as possible using the information below, and if the answer is not within the information, say 'I don't know.\n" // + "\n" // + "Question:\n" // + "{{question}}\n" // + "\n" // + "Information:\n" // + "{{information}}"); // // System.out.println("checkpoint 3"); // // // // Send the prompt to the OpenAI chat model // ChatLanguageModel chatModel = OpenAiChatModel.builder() // .apiKey(dotenv.get("OPENAI_API_KEY")) // .modelName(GPT_3_5_TURBO) // .temperature(0.7) // .timeout(ofSeconds(15)) // .maxRetries(3) // .logResponses(true) // .logRequests(true) // .build(); // // ChatMemory chatMemory = MessageWindowChatMemory.builder() // .maxMessages(10) // .chatMemoryStore(new PersistentChatMemoryStore()) // .build(); // // ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() // .chatLanguageModel(chatModel) // .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel, 1, 0.9)) // .chatMemory(chatMemory) // you can override default chat memory // .promptTemplate(promptTemplate) // you can override default prompt template // .build(); // // String answer = chain.execute(request.getQuestion()); // System.out.println(chatMemory.messages()); // return answer; // } static class PersistentChatMemoryStore implements ChatMemoryStore { private final DB db = DBMaker.fileDB("chat-memory.db").transactionEnable().make(); private final Map<String, String> map = db.hashMap("messages", STRING, STRING).createOrOpen(); @Override public List<ChatMessage> getMessages(Object memoryId) { String json = map.get((String) memoryId); return messagesFromJson(json); } @Override public void updateMessages(Object memoryId, List<ChatMessage> messages) { String json = messagesToJson(messages); map.put((String) memoryId, json); db.commit(); } @Override public void deleteMessages(Object memoryId) { map.remove((String) memoryId); db.commit(); } } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((2386, 2740), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2386, 2711), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2386, 2672), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2386, 2632), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2386, 2597), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2386, 2553), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2386, 2515), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2386, 2469), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2779, 2948), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((2779, 2919), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((2779, 2849), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((3000, 3398), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3000, 3325), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3000, 3233), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3000, 3189), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3000, 3088), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((5833, 5892), 'org.mapdb.DBMaker.fileDB'), ((5833, 5885), 'org.mapdb.DBMaker.fileDB')]
package org.example; import dev.langchain4j.chain.ConversationalChain; import dev.langchain4j.model.openai.OpenAiChatModel; import java.util.Scanner; public class _08_ConversationalChain { public static void main(String[] args) { OpenAiChatModel model = OpenAiChatModel.withApiKey(ApiKeys.OPENAI_DEMO); ConversationalChain chain = ConversationalChain.builder() .chatLanguageModel(model) .build(); Scanner scanner = new Scanner(System.in); while (scanner.hasNext()) { String in = scanner.nextLine(); String answer = chain.execute(in); System.out.println(answer); } } }
[ "dev.langchain4j.chain.ConversationalChain.builder" ]
[((357, 453), 'dev.langchain4j.chain.ConversationalChain.builder'), ((357, 428), 'dev.langchain4j.chain.ConversationalChain.builder')]
package org.agoncal.fascicle.langchain4j.accessing.openai; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.SystemMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiLanguageModel; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import dev.langchain4j.model.output.FinishReason; import dev.langchain4j.model.output.Response; import dev.langchain4j.model.output.TokenUsage; // tag::adocSkip[] /** * @author Antonio Goncalves * http://www.antoniogoncalves.org * -- */ // end::adocSkip[] public class MusicianService { public static void main(String[] args) { MusicianService musicianService = new MusicianService(); musicianService.useOpenAiLanguageModel(); musicianService.useOpenAiLanguageModelPrompt(); musicianService.useOpenAiLanguageModelBuilder(); musicianService.useOpenAiChatModel(); musicianService.useOpenAiChatModelBuilder(); } private static final String OPENAI_API_KEY = System.getenv("OPENAI_API_KEY"); private static final String PROMPT = "When was the first Beatles album released?"; // ############################# // ### OPENAI LANGUAGE MODEL ### // ############################# public void useOpenAiLanguageModel() { System.out.println("### useOpenAiLanguageModel"); OpenAiLanguageModel model = OpenAiLanguageModel.withApiKey(OPENAI_API_KEY); Response<String> completion = model.generate("When was the first Beatles album released?"); String content = completion.content(); FinishReason finishReason = completion.finishReason(); TokenUsage tokenUsage = completion.tokenUsage(); System.out.println(content); System.out.println(finishReason.name()); System.out.println(tokenUsage.inputTokenCount()); System.out.println(tokenUsage.outputTokenCount()); System.out.println(tokenUsage.totalTokenCount()); } public void useOpenAiLanguageModelPrompt() { System.out.println("### useOpenAiLanguageModelPrompt"); OpenAiLanguageModel model = OpenAiLanguageModel.withApiKey(OPENAI_API_KEY); Prompt prompt = new Prompt("When was the first Beatles album released?"); Response<String> completion = model.generate(prompt); String content = completion.content(); FinishReason finishReason = completion.finishReason(); TokenUsage tokenUsage = completion.tokenUsage(); System.out.println(content); System.out.println(finishReason.name()); System.out.println(tokenUsage.inputTokenCount()); System.out.println(tokenUsage.outputTokenCount()); System.out.println(tokenUsage.totalTokenCount()); } public void useOpenAiLanguageModelBuilder() { System.out.println("### useOpenAiLanguageModelBuilder"); // tag::adocSnippet[] OpenAiLanguageModel model = OpenAiLanguageModel.builder() .apiKey(OPENAI_API_KEY) .temperature(0.3) .logRequests(true) .logResponses(true) .build(); // end::adocSnippet[] Response<String> completion = model.generate(PROMPT); System.out.println(completion.content()); System.out.println(completion.finishReason()); System.out.println(completion.tokenUsage()); } // ######################### // ### OPENAI CHAT MODEL ### // ######################### public void useOpenAiChatModel() { System.out.println("### useOpenAiChatModel"); // tag::adocSimple[] OpenAiChatModel model = OpenAiChatModel.withApiKey(OPENAI_API_KEY); String completion = model.generate("When was the first Rolling Stones album released?"); System.out.println(completion); // end::adocSimple[] } public void useOpenAiChatModelBuilder() { System.out.println("### useOpenAiChatModelBuilder"); OpenAiChatModel model = OpenAiChatModel.builder() .apiKey(OPENAI_API_KEY) .modelName(GPT_3_5_TURBO) .temperature(0.9) .logRequests(true) .logResponses(true) .build(); String completion = model.generate("When was the first Rolling Stones album released?"); System.out.println(completion); } public void useOpenAiChatModelAiMessage() { System.out.println("### useOpenAiChatModelAiMessage"); OpenAiChatModel model = OpenAiChatModel.withApiKey(OPENAI_API_KEY); SystemMessage sysMsg = new SystemMessage("You are a music expert."); UserMessage userMsg = new UserMessage("When was the first Rolling Stones album released?"); Response<AiMessage> completion = model.generate(sysMsg, userMsg); System.out.println(completion); } }
[ "dev.langchain4j.model.openai.OpenAiLanguageModel.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((2924, 3073), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((2924, 3058), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((2924, 3032), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((2924, 3007), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((2924, 2983), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((3881, 4058), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3881, 4043), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3881, 4017), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3881, 3992), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3881, 3968), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3881, 3936), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore; import java.util.List; public class WeaviateEmbeddingStoreExample { public static void main(String[] args) { EmbeddingStore<TextSegment> embeddingStore = WeaviateEmbeddingStore.builder() // Find it under "Show API keys" of your Weaviate cluster. .apiKey(System.getenv("WEAVIATE_API_KEY")) // The scheme, e.g. "https" of cluster URL. Find in under Details of your Weaviate cluster. .scheme("https") // The host, e.g. "test-o1gvgnp4.weaviate.network" of cluster URL. // Find in under Details of your Weaviate cluster. .host("test3-bwsieg9y.weaviate.network") // "Default" class is used if not specified. Must start from an uppercase letter! .objectClass("Test") // If true (default), then WeaviateEmbeddingStore will generate a hashed ID based on provided // text segment, which avoids duplicated entries in DB. If false, then random ID will be generated. .avoidDups(true) // Consistency level: ONE, QUORUM (default) or ALL. .consistencyLevel("ALL") .build(); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); TextSegment segment1 = TextSegment.from("I like football."); Embedding embedding1 = embeddingModel.embed(segment1).content(); embeddingStore.add(embedding1, segment1); TextSegment segment2 = TextSegment.from("The weather is good today."); Embedding embedding2 = embeddingModel.embed(segment2).content(); embeddingStore.add(embedding2, segment2); Embedding queryEmbedding = embeddingModel.embed("What is your favourite sport?").content(); List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(queryEmbedding, 1); EmbeddingMatch<TextSegment> embeddingMatch = relevant.get(0); System.out.println(embeddingMatch.score()); // 0.8144288063049316 System.out.println(embeddingMatch.embedded().text()); // I like football. } }
[ "dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder" ]
[((573, 1615), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((573, 1590), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((573, 1481), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((573, 1222), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((573, 1087), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((573, 880), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((573, 739), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder')]
import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.mistralai.MistralAiChatModel; import dev.langchain4j.model.mistralai.MistralAiEmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import java.util.HashMap; import java.util.List; import java.util.Map; import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument; import static java.util.stream.Collectors.joining; public class MistralAiBasicRagEmbedExamples { static class Chat_Story_From_My_Document { public static void main(String[] args) { // In this very simple example, we are getting data that we want to use for RAG. // We will use a history about origin of the Llama by National Geographic https://www.nationalgeographic.es/animales/llama. Document document = loadDocument(toPath("example-files/story-about-origin-of-the-llama.txt"), new TextDocumentParser()); // In a RAG system, it is crucial to split the document into smaller chunks so that it's more effective // to identify and retrieve the most relevant information in the retrieval process later DocumentSplitter splitter = DocumentSplitters.recursive(200, 0); List<TextSegment> segments = splitter.split(document); // Now, for each text segment, we need to create text embeddings, which are numeric representations of the text in the vector space. // Of course, we will use Mistral AI for this purpose. EmbeddingModel embeddingModel = MistralAiEmbeddingModel.withApiKey(System.getenv("MISTRAL_AI_API_KEY")); List<Embedding> embeddings = embeddingModel.embedAll(segments).content(); // Once we get the text embeddings, we will store them in a vector database for efficient processing and retrieval. // For simplicity, this example uses an in-memory store, but you can choose any external compatible store for production environments. EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); // Whenever users ask a question, we also need to create embeddings for this question using the same embedding models as before. String question = "Who create the llamas?"; Embedding questionEmbedding = embeddingModel.embed(question).content(); // We can perform a search on the vector database and retrieve the most relevant text chunks based on the user question. int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); // Now we can offer the relevant information as the context information within the prompt. // Here is a prompt template where we can include both the retrieved text and user question in the prompt. PromptTemplate promptTemplate = PromptTemplate.from( "Context information is below.:\n" + "------------------\n" + "{{information}}\n" + "------------------\n" + "Given the context information and not prior knowledge, answer the query.\n" + "Query: {{question}}\n" + "Answer:"); String information = relevantEmbeddings.stream() .map(match -> match.embedded().text()) .collect(joining("\n\n")); Map<String, Object> promptInputs = new HashMap<>(); promptInputs.put("question", question); promptInputs.put("information", information); Prompt prompt = promptTemplate.apply(promptInputs); // Now we can use the Mistral AI chat model to generate the answer to the user question based on the context information. ChatLanguageModel chatModel = MistralAiChatModel.builder() .apiKey(System.getenv("MISTRAL_AI_API_KEY")) .modelName("mistral-medium") .temperature(0.2) // expect a more focused and deterministic answer .logRequests(true) .logResponses(true) .build(); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content(); String answer = aiMessage.text(); System.out.println(answer); // According to Inca legend, the llamas were created by the mythical founders of the Inca Empire.... } } static Path toPath(String fileName) { try { URL fileUrl = MistralAiBasicRagEmbedExamples.class.getResource(fileName); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.model.mistralai.MistralAiChatModel.builder" ]
[((4843, 5181), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((4843, 5152), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((4843, 5112), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((4843, 5023), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((4843, 4985), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((4843, 4936), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder')]
package de.htwg.rag.retriever; import dev.langchain4j.model.embedding.AllMiniLmL6V2QuantizedEmbeddingModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.rag.DefaultRetrievalAugmentor; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.rag.RetrievalAugmentor; import dev.langchain4j.rag.content.injector.ContentInjector; import dev.langchain4j.rag.content.injector.DefaultContentInjector; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.rag.query.transformer.CompressingQueryTransformer; import dev.langchain4j.rag.query.transformer.QueryTransformer; import io.quarkiverse.langchain4j.pgvector.PgVectorEmbeddingStore; import jakarta.inject.Singleton; import java.util.function.Supplier; import static java.util.Arrays.asList; @Singleton public class AdvancedRetrievalAugmentor implements Supplier<RetrievalAugmentor> { private final RetrievalAugmentor augmentor; // uses the PgVectorEmbeddingStore and the AllMiniLmL6V2QuantizedEmbeddingModel. // The Store is a extension of the normal PostgresDB and the model is running locally. public AdvancedRetrievalAugmentor(PgVectorEmbeddingStore store, AllMiniLmL6V2QuantizedEmbeddingModel model) { // chatmodel just for the query transformer, can be any model, // all it does is compress the input query's to one so that the retrieval is more accurate // and logic from the chat-history gets taken into account ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(System.getenv("OPENAI_APIKEY")) .modelName("gpt-3.5-turbo") .logRequests(true) .logResponses(true) .build(); QueryTransformer queryTransformer = CompressingQueryTransformer.builder() .chatLanguageModel(chatModel) .promptTemplate(PromptTemplate.from("Lese und verstehe das Gespräch zwischen dem Benutzer und dem KI. Analysiere dann die neue Anfrage des Benutzers. Identifiziere alle relevanten Details, Begriffe und den Kontext sowohl aus dem Gespräch als auch aus der neuen Anfrage. Formuliere diese Anfrage in ein klares, prägnantes und in sich geschlossenes Format um, das für die Informationssuche geeignet ist.\n" + "\n" + "Gespräch:\n" + "{{chatMemory}}\n" + "\n" + "Benutzeranfrage: {{query}}\n" + "\n" + "Es ist sehr wichtig, dass du nur die umformulierte Anfrage und nichts anderes bereitstellst! Füge einer Anfrage nichts voran!")) .build(); // ContentInjector to give metadata with the retrieved documents ContentInjector contentInjector = DefaultContentInjector.builder() .metadataKeysToInclude(asList("link")) .build(); // The normal Retriever to get the Documents from the store. EmbeddingStoreContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingModel(model) .embeddingStore(store) .maxResults(3) .minScore(0.7) .build(); augmentor = DefaultRetrievalAugmentor .builder() .contentRetriever(contentRetriever) .queryTransformer(queryTransformer) .contentInjector(contentInjector) .build(); } @Override public RetrievalAugmentor get() { return augmentor; } }
[ "dev.langchain4j.rag.query.transformer.CompressingQueryTransformer.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder", "dev.langchain4j.rag.content.injector.DefaultContentInjector.builder" ]
[((1597, 1818), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1597, 1793), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1597, 1757), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1597, 1722), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1597, 1678), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1864, 2790), 'dev.langchain4j.rag.query.transformer.CompressingQueryTransformer.builder'), ((1864, 2765), 'dev.langchain4j.rag.query.transformer.CompressingQueryTransformer.builder'), ((1864, 1947), 'dev.langchain4j.rag.query.transformer.CompressingQueryTransformer.builder'), ((2908, 3020), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder'), ((2908, 2995), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder'), ((3150, 3355), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3150, 3330), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3150, 3299), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3150, 3268), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3150, 3229), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder')]
package com.anthonyquere.companionapi.completion.langchain.models; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.mistralai.MistralAiChatModel; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @Configuration public class LangchainMistralModel { @Value("${mistral-ia.api-key}") private String mistralApiKey; @Bean public ChatLanguageModel buildMistralModel() { return MistralAiChatModel.builder() .apiKey(mistralApiKey) .maxTokens(100) .modelName("mistral-tiny") .temperature(1.0d) .logRequests(true) .logResponses(true) .maxRetries(1) .build(); } }
[ "dev.langchain4j.model.mistralai.MistralAiChatModel.builder" ]
[((553, 857), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((553, 832), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((553, 801), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((553, 765), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((553, 730), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((553, 695), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((553, 652), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((553, 620), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder')]
package me.nzuguem.something.story.configurations.langchain; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.rag.content.Content; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.rag.query.Query; import io.quarkiverse.langchain4j.chroma.ChromaEmbeddingStore; import jakarta.enterprise.context.ApplicationScoped; import java.util.List; @ApplicationScoped public class StoryContentRetriever implements ContentRetriever { private final EmbeddingStoreContentRetriever retriever; public StoryContentRetriever(ChromaEmbeddingStore chromaEmbeddingStore, EmbeddingModel embeddingModel) { this.retriever = EmbeddingStoreContentRetriever.builder() .embeddingModel(embeddingModel) .embeddingStore(chromaEmbeddingStore) .maxResults(20) .build(); } @Override public List<Content> retrieve(Query query) { return this.retriever.retrieve(query); } }
[ "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder" ]
[((762, 961), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((762, 936), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((762, 904), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((762, 850), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder')]
import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.mistralai.MistralAiChatModel; public class MistralAiChatModelExamples { static class Simple_Prompt { public static void main(String[] args) { ChatLanguageModel model = MistralAiChatModel.builder() .apiKey(System.getenv("MISTRAL_AI_API_KEY")) // Please use your own Mistral AI API key .modelName("mistral-small") .logRequests(true) .logResponses(true) .build(); String joke = model.generate("Tell me a joke about Java"); System.out.println(joke); } } }
[ "dev.langchain4j.model.mistralai.MistralAiChatModel.builder" ]
[((278, 569), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((278, 540), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((278, 500), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((278, 461), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder'), ((278, 371), 'dev.langchain4j.model.mistralai.MistralAiChatModel.builder')]
import dev.langchain4j.code.Judge0JavaScriptExecutionTool; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import static java.time.Duration.ofSeconds; public class _11_ServiceWithDynamicToolsExample { interface Assistant { String chat(String message); } public static void main(String[] args) { Judge0JavaScriptExecutionTool judge0Tool = new Judge0JavaScriptExecutionTool(ApiKeys.RAPID_API_KEY); ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .temperature(0.0) .timeout(ofSeconds(60)) .build(); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatLanguageModel) .chatMemory(MessageWindowChatMemory.withMaxMessages(20)) .tools(judge0Tool) .build(); interact(assistant, "What is the square root of 49506838032859?"); interact(assistant, "Capitalize every third letter: abcabc"); interact(assistant, "What is the number of hours between 17:00 on 21 Feb 1988 and 04:00 on 12 Apr 2014?"); } private static void interact(Assistant assistant, String userMessage) { System.out.println("[User]: " + userMessage); String answer = assistant.chat(userMessage); System.out.println("[Assistant]: " + answer); System.out.println(); System.out.println(); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((638, 810), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((638, 785), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((638, 745), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((638, 711), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((843, 1065), 'dev.langchain4j.service.AiServices.builder'), ((843, 1040), 'dev.langchain4j.service.AiServices.builder'), ((843, 1005), 'dev.langchain4j.service.AiServices.builder'), ((843, 932), 'dev.langchain4j.service.AiServices.builder')]
package dev.langchain4j.chain; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.rag.*; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.injector.DefaultContentInjector; import dev.langchain4j.rag.query.Metadata; import dev.langchain4j.retriever.Retriever; import dev.langchain4j.service.AiServices; import static dev.langchain4j.internal.Utils.getOrDefault; import static dev.langchain4j.internal.ValidationUtils.ensureNotNull; /** * A chain for conversing with a specified {@link ChatLanguageModel} * based on the information retrieved by a specified {@link ContentRetriever}. * Includes a default {@link ChatMemory} (a message window with maximum 10 messages), which can be overridden. * You can fully customize RAG behavior by providing an instance of a {@link RetrievalAugmentor}, * such as {@link DefaultRetrievalAugmentor}, or your own custom implementation. * <br> * It is recommended to use {@link AiServices} instead, as it is more powerful. */ public class ConversationalRetrievalChain implements Chain<String, String> { private final ChatLanguageModel chatLanguageModel; private final ChatMemory chatMemory; private final RetrievalAugmentor retrievalAugmentor; public ConversationalRetrievalChain(ChatLanguageModel chatLanguageModel, ChatMemory chatMemory, ContentRetriever contentRetriever) { this( chatLanguageModel, chatMemory, DefaultRetrievalAugmentor.builder() .contentRetriever(contentRetriever) .build() ); } public ConversationalRetrievalChain(ChatLanguageModel chatLanguageModel, ChatMemory chatMemory, RetrievalAugmentor retrievalAugmentor) { this.chatLanguageModel = ensureNotNull(chatLanguageModel, "chatLanguageModel"); this.chatMemory = getOrDefault(chatMemory, () -> MessageWindowChatMemory.withMaxMessages(10)); this.retrievalAugmentor = ensureNotNull(retrievalAugmentor, "retrievalAugmentor"); } /** * Use another constructor with a new {@link ContentRetriever} instead. */ @Deprecated public ConversationalRetrievalChain(ChatLanguageModel chatLanguageModel, ChatMemory chatMemory, PromptTemplate promptTemplate, Retriever<TextSegment> retriever) { this( chatLanguageModel, chatMemory, DefaultRetrievalAugmentor.builder() .contentRetriever(retriever.toContentRetriever()) .contentInjector(DefaultContentInjector.builder() .promptTemplate(toPromptTemplateWithNewVariableNames(promptTemplate)) .build()) .build() ); } @Override public String execute(String query) { UserMessage userMessage = UserMessage.from(query); Metadata metadata = Metadata.from(userMessage, chatMemory.id(), chatMemory.messages()); userMessage = retrievalAugmentor.augment(userMessage, metadata); chatMemory.add(userMessage); AiMessage aiMessage = chatLanguageModel.generate(chatMemory.messages()).content(); chatMemory.add(aiMessage); return aiMessage.text(); } public static Builder builder() { return new Builder(); } public static class Builder { private ChatLanguageModel chatLanguageModel; private ChatMemory chatMemory; private RetrievalAugmentor retrievalAugmentor; @Deprecated private dev.langchain4j.retriever.Retriever<TextSegment> retriever; @Deprecated private PromptTemplate promptTemplate; public Builder chatLanguageModel(ChatLanguageModel chatLanguageModel) { this.chatLanguageModel = chatLanguageModel; return this; } public Builder chatMemory(ChatMemory chatMemory) { this.chatMemory = chatMemory; return this; } public Builder contentRetriever(ContentRetriever contentRetriever) { if (contentRetriever != null) { this.retrievalAugmentor = DefaultRetrievalAugmentor.builder() .contentRetriever(contentRetriever) .build(); } return this; } public Builder retrievalAugmentor(RetrievalAugmentor retrievalAugmentor) { this.retrievalAugmentor = retrievalAugmentor; return this; } /** * Deprecated. Use {@link Builder#contentRetriever(ContentRetriever)} instead. */ @Deprecated public Builder retriever(dev.langchain4j.retriever.Retriever<TextSegment> retriever) { this.retriever = retriever; return this; } /** * Deprecated, Use this instead:<pre> * .retrievalAugmentor(DefaultRetrievalAugmentor.builder() * .contentInjector(DefaultContentInjector.builder() * .promptTemplate(promptTemplate) * .build()) * .build()); * </pre> */ @Deprecated public Builder promptTemplate(PromptTemplate promptTemplate) { this.promptTemplate = promptTemplate; return this; } public ConversationalRetrievalChain build() { if (retriever != null) { retrievalAugmentor = DefaultRetrievalAugmentor.builder() .contentRetriever(retriever.toContentRetriever()) .contentInjector(DefaultContentInjector.builder() .promptTemplate(toPromptTemplateWithNewVariableNames(promptTemplate)) .build()) .build(); } return new ConversationalRetrievalChain(chatLanguageModel, chatMemory, retrievalAugmentor); } } private static PromptTemplate toPromptTemplateWithNewVariableNames(PromptTemplate oldPromptTemplate) { if (oldPromptTemplate != null) { return PromptTemplate.from(oldPromptTemplate.template() .replaceAll("\\{\\{question}}", "{{userMessage}}") .replaceAll("\\{\\{information}}", "{{contents}}") ); } return PromptTemplate.from( "Answer the following question to the best of your ability: {{userMessage}}\n" + "\n" + "Base your answer on the following information:\n" + "{{contents}}" ); } }
[ "dev.langchain4j.rag.content.injector.DefaultContentInjector.builder" ]
[((3177, 3352), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder'), ((3177, 3311), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder'), ((6230, 6405), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder'), ((6230, 6364), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder')]
package dev.langchain4j.model.vertexai; import com.google.cloud.vertexai.api.*; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Struct; import com.google.protobuf.Value; import com.google.protobuf.util.JsonFormat; import dev.langchain4j.agent.tool.ToolExecutionRequest; import dev.langchain4j.agent.tool.ToolExecutionRequestUtil; import dev.langchain4j.agent.tool.ToolParameters; import dev.langchain4j.agent.tool.ToolSpecification; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; class FunctionCallHelper { static Type fromType(String type) { //TODO: is it covering all the types correctly? switch (type) { case "string": return Type.STRING; case "integer": return Type.INTEGER; case "boolean": return Type.BOOLEAN; case "number": return Type.NUMBER; case "array": return Type.ARRAY; case "object": return Type.OBJECT; default: return Type.TYPE_UNSPECIFIED; } } static FunctionCall fromToolExecutionRequest(ToolExecutionRequest toolExecutionRequest) { FunctionCall.Builder fnCallBuilder = FunctionCall.newBuilder() .setName(toolExecutionRequest.name()); Struct.Builder structBuilder = Struct.newBuilder(); try { JsonFormat.parser().merge(toolExecutionRequest.arguments(), structBuilder); } catch (InvalidProtocolBufferException e) { throw new RuntimeException(e); } Struct argsStruct = structBuilder.build(); fnCallBuilder.setArgs(argsStruct); return fnCallBuilder.build(); } static List<ToolExecutionRequest> fromFunctionCalls(List<FunctionCall> functionCalls) { List<ToolExecutionRequest> toolExecutionRequests = new ArrayList<>(); for (FunctionCall functionCall : functionCalls) { ToolExecutionRequest.Builder builder = ToolExecutionRequest.builder() .name(functionCall.getName()); Map<String, Object> callArgsMap = new HashMap<>(); Struct callArgs = functionCall.getArgs(); Map<String, Value> callArgsFieldsMap = callArgs.getFieldsMap(); callArgsFieldsMap.forEach((key, value) -> callArgsMap.put(key, unwrapProtoValue(value))); String serializedArgsMap = ToolExecutionRequestUtil.GSON.toJson(callArgsMap); builder.arguments(serializedArgsMap); toolExecutionRequests.add(builder.build()); } return toolExecutionRequests; } static Object unwrapProtoValue(Value value) { Object unwrappedValue; switch (value.getKindCase()) { case NUMBER_VALUE: unwrappedValue = value.getNumberValue(); break; case STRING_VALUE: unwrappedValue = value.getStringValue(); break; case BOOL_VALUE: unwrappedValue = value.getBoolValue(); break; case STRUCT_VALUE: HashMap<String, Object> mapForStruct = new HashMap<>(); value.getStructValue().getFieldsMap().forEach((key, val) -> mapForStruct.put(key, unwrapProtoValue(val))); unwrappedValue = mapForStruct; break; case LIST_VALUE: unwrappedValue = value.getListValue().getValuesList().stream().map(FunctionCallHelper::unwrapProtoValue).collect(Collectors.toList()); break; default: // NULL_VALUE, KIND_NOT_SET, and default unwrappedValue = null; break; } return unwrappedValue; } static Tool convertToolSpecifications(List<ToolSpecification> toolSpecifications) { Tool.Builder tool = Tool.newBuilder(); for (ToolSpecification toolSpecification : toolSpecifications) { FunctionDeclaration.Builder fnBuilder = FunctionDeclaration.newBuilder() .setName(toolSpecification.name()) .setDescription(toolSpecification.description()); Schema.Builder schema = Schema.newBuilder().setType(Type.OBJECT); ToolParameters parameters = toolSpecification.parameters(); for (String paramName : parameters.required()) { schema.addRequired(paramName); } parameters.properties().forEach((paramName, paramProps) -> { //TODO: is it covering all types & cases of tool parameters? (array & object in particular) Type type = fromType((String) paramProps.getOrDefault("type", Type.TYPE_UNSPECIFIED)); String description = (String) paramProps.getOrDefault("description", ""); schema.putProperties(paramName, Schema.newBuilder() .setDescription(description) .setType(type) .build()); }); fnBuilder.setParameters(schema.build()); tool.addFunctionDeclarations(fnBuilder.build()); } return tool.build(); } }
[ "dev.langchain4j.agent.tool.ToolExecutionRequest.builder", "dev.langchain4j.agent.tool.ToolExecutionRequestUtil.GSON.toJson" ]
[((1523, 1597), 'com.google.protobuf.util.JsonFormat.parser'), ((2125, 2201), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((2539, 2588), 'dev.langchain4j.agent.tool.ToolExecutionRequestUtil.GSON.toJson')]
package org.acme; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.rag.DefaultRetrievalAugmentor; import dev.langchain4j.rag.RetrievalAugmentor; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.util.function.Supplier; @Singleton public class RetrievalAugmentorExample implements Supplier<RetrievalAugmentor> { @Inject EmbeddingModel embeddingModel; @Inject EmbeddingStore<TextSegment> embeddingStore; @Override public RetrievalAugmentor get() { return DefaultRetrievalAugmentor.builder() .contentRetriever(EmbeddingStoreContentRetriever.builder() .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build()) .build(); } }
[ "dev.langchain4j.rag.DefaultRetrievalAugmentor.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder" ]
[((726, 1007), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((726, 982), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((796, 981), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((796, 948), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((796, 892), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder')]
import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentParser; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.loader.FileSystemDocumentLoader; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.rag.DefaultRetrievalAugmentor; import dev.langchain4j.rag.RetrievalAugmentor; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.rag.query.router.LanguageModelQueryRouter; import dev.langchain4j.rag.query.router.QueryRouter; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Scanner; public class _03_Advanced_RAG_with_Query_Routing { /** * Please refer to previous examples for basic context. * <p> * Advanced RAG in LangChain4j is described here: https://github.com/langchain4j/langchain4j/pull/538 * <p> * This example showcases the implementation of a more advanced RAG application * using a technique known as "query routing". * <p> * Often, private data is spread across multiple sources and formats. * This might include internal company documentation on Confluence, your project's code in a Git repository, * a relational database with user data, or a search engine with the products you sell, among others. * In a RAG flow that utilizes data from multiple sources, you will likely have multiple * {@link EmbeddingStore}s or {@link ContentRetriever}s. * While you could route each user query to all available {@link ContentRetriever}s, * this approach might be inefficient and counterproductive. * <p> * "Query routing" is the solution to this challenge. It involves directing a query to the most appropriate * {@link ContentRetriever} (or several). Routing can be implemented in various ways: * - Using rules (e.g., depending on the user's privileges, location, etc.). * - Using keywords (e.g., if a query contains words X1, X2, X3, route it to {@link ContentRetriever} X, etc.). * - Using semantic similarity (see EmbeddingModelTextClassifierExample in this repository). * - Using an LLM to make a routing decision. * <p> * For scenarios 1, 2, and 3, you can implement a custom {@link QueryRouter}. * For scenario 4, this example will demonstrate how to use a {@link LanguageModelQueryRouter}. * <p> * We will continue using {@link AiServices} for this example, * but the same principles apply to {@link ConversationalRetrievalChain}, or you can develop your custom RAG flow. */ public static void main(String[] args) { Polymath polymath = createPolymath(); // First, ask "What is the legacy of John Doe?" // Then, ask "Can I cancel my reservation?" // Now, see the logs to observe how the queries are routed to different retrievers. try (Scanner scanner = new Scanner(System.in)) { while (true) { System.out.println("=================================================="); System.out.print("User: "); String userQuery = scanner.nextLine(); System.out.println("=================================================="); if ("exit".equalsIgnoreCase(userQuery)) { break; } String polymathAnswer = polymath.answer(userQuery); System.out.println("=================================================="); System.out.println("Polymath: " + polymathAnswer); } } } private static Polymath createPolymath() { // Check _01_Naive_RAG if you need more details on what is going on here ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey("demo") .build(); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); // Let's create a separate embedding store specifically for biographies. EmbeddingStore<TextSegment> biographyEmbeddingStore = embed(toPath("biography-of-john-doe.txt"), embeddingModel); ContentRetriever biographyContentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingStore(biographyEmbeddingStore) .embeddingModel(embeddingModel) .maxResults(2) .minScore(0.6) .build(); // Additionally, let's create a separate embedding store dedicated to terms of use. EmbeddingStore<TextSegment> termsOfUseEmbeddingStore = embed(toPath("miles-of-smiles-terms-of-use.txt"), embeddingModel); ContentRetriever termsOfUseContentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingStore(termsOfUseEmbeddingStore) .embeddingModel(embeddingModel) .maxResults(2) .minScore(0.6) .build(); // Let's create a query router. Map<ContentRetriever, String> retrieverToDescription = new HashMap<>(); retrieverToDescription.put(biographyContentRetriever, "biography of John Doe"); retrieverToDescription.put(termsOfUseContentRetriever, "terms of use of car rental company"); QueryRouter queryRouter = new LanguageModelQueryRouter(chatModel, retrieverToDescription); RetrievalAugmentor retrievalAugmentor = DefaultRetrievalAugmentor.builder() .queryRouter(queryRouter) .build(); return AiServices.builder(Polymath.class) .chatLanguageModel(chatModel) .retrievalAugmentor(retrievalAugmentor) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .build(); } private static EmbeddingStore<TextSegment> embed(Path documentPath, EmbeddingModel embeddingModel) { DocumentParser documentParser = new TextDocumentParser(); Document document = FileSystemDocumentLoader.loadDocument(documentPath, documentParser); DocumentSplitter splitter = DocumentSplitters.recursive(300, 0); List<TextSegment> segments = splitter.split(document); List<Embedding> embeddings = embeddingModel.embedAll(segments).content(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); return embeddingStore; } interface Polymath { String answer(String query); } private static Path toPath(String fileName) { try { URL fileUrl = _03_Advanced_RAG_with_Query_Routing.class.getResource(fileName); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.rag.DefaultRetrievalAugmentor.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((4678, 4760), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((4678, 4735), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5111, 5343), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5111, 5318), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5111, 5287), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5111, 5256), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5111, 5208), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5638, 5871), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5638, 5846), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5638, 5815), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5638, 5784), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((5638, 5736), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6332, 6434), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((6332, 6409), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((6452, 6686), 'dev.langchain4j.service.AiServices.builder'), ((6452, 6661), 'dev.langchain4j.service.AiServices.builder'), ((6452, 6588), 'dev.langchain4j.service.AiServices.builder'), ((6452, 6532), 'dev.langchain4j.service.AiServices.builder')]
package org.example; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.huggingface.HuggingFaceChatModel; import dev.langchain4j.model.huggingface.HuggingFaceModelName; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiChatModelName; public class _02_Builder { public static void main(String[] args) { ChatLanguageModel openAI = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_DEMO) .modelName(OpenAiChatModelName.GPT_4) .temperature(0.3d) .maxTokens(50) .logRequests(true) .logResponses(true) .maxRetries(3) .build(); ChatLanguageModel huggingFace = HuggingFaceChatModel.builder() .accessToken(ApiKeys.OPENAI_DEMO) .modelId(HuggingFaceModelName.TII_UAE_FALCON_7B_INSTRUCT) .temperature(0.3d) .build(); } }
[ "dev.langchain4j.model.huggingface.HuggingFaceChatModel.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((421, 738), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((421, 713), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((421, 682), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((421, 646), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((421, 611), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((421, 580), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((421, 545), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((421, 491), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((781, 995), 'dev.langchain4j.model.huggingface.HuggingFaceChatModel.builder'), ((781, 970), 'dev.langchain4j.model.huggingface.HuggingFaceChatModel.builder'), ((781, 935), 'dev.langchain4j.model.huggingface.HuggingFaceChatModel.builder'), ((781, 861), 'dev.langchain4j.model.huggingface.HuggingFaceChatModel.builder')]
package io.kadras.music.agent; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.embedding.EmbeddingStore; import io.kadras.music.tools.VirtualInstrumentTools; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @Configuration(proxyBeanMethods = false) public class ComposerAgentConfig { @Bean ComposerAgent documentAgent(ChatLanguageModel chatLanguageModel, EmbeddingModel embeddingModel, EmbeddingStore<TextSegment> embeddingStore, VirtualInstrumentTools virtualInstrumentTools) { ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingStore(embeddingStore) .embeddingModel(embeddingModel) .maxResults(5) .minScore(0.5) .build(); ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); return AiServices.builder(ComposerAgent.class) .chatLanguageModel(chatLanguageModel) .contentRetriever(contentRetriever) .chatMemory(chatMemory) .tools(virtualInstrumentTools) .build(); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder" ]
[((1053, 1276), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1053, 1251), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1053, 1220), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1053, 1189), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1053, 1141), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1372, 1629), 'dev.langchain4j.service.AiServices.builder'), ((1372, 1604), 'dev.langchain4j.service.AiServices.builder'), ((1372, 1557), 'dev.langchain4j.service.AiServices.builder'), ((1372, 1517), 'dev.langchain4j.service.AiServices.builder'), ((1372, 1465), 'dev.langchain4j.service.AiServices.builder')]
package org.tutorial.yy.langchain.demo.aiservice.memory; import dev.langchain4j.chain.ConversationalChain; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import org.tutorial.yy.langchain.demo.aiservice.LangChainBase; import java.io.IOException; import static dev.langchain4j.data.message.UserMessage.userMessage; /** * @author yyHuangfu * @create 2024/2/3 * @description */ public class ChatMemoryExamples extends LangChainBase { public static void main(String[] args) throws IOException { ConversationalChain chain = ConversationalChain.builder() .chatLanguageModel(getModel()) .chatMemory(getCustomMemory()) .build(); String answerWithName = chain.execute("can u introduce yourself?"); // 你能介绍一下你自己吗? System.out.println(answerWithName); String answerInCN = chain.execute("in chinese plz"); // 请用中文说 System.out.println(answerInCN); } public static ChatMemory getCustomMemory() { // yiyu robot ChatMemory chatMemory = MessageWindowChatMemory.builder() .maxMessages(10) .build(); chatMemory.add(userMessage("you are the human called Alex HF, u also called yiyu")); chatMemory.add(userMessage("u like programming, reading and any sports")); chatMemory.add(userMessage("u can speak Chinese and English")); return chatMemory; } }
[ "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder", "dev.langchain4j.chain.ConversationalChain.builder" ]
[((594, 742), 'dev.langchain4j.chain.ConversationalChain.builder'), ((594, 717), 'dev.langchain4j.chain.ConversationalChain.builder'), ((594, 670), 'dev.langchain4j.chain.ConversationalChain.builder'), ((1132, 1223), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((1132, 1198), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder')]
package org.tutorial.yy.langchain.service; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiStreamingChatModel; import dev.langchain4j.model.qianfan.QianfanChatModel; import dev.langchain4j.model.qianfan.QianfanStreamingChatModel; import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_3_5_TURBO; import static java.time.Duration.ofSeconds; /** * @author yyHuangfu * @create 2024/1/29 * @description */ public class LangModel { public static ChatLanguageModel getModel(String modelName) { switch (modelName) { case "qian_fan": return getQianFanModel(); case "open_ai": return getOpenAIModel(); default: throw new RuntimeException("unKnown model name, plz check your config"); } } private static ChatLanguageModel getOpenAIModel() { return OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .modelName(GPT_3_5_TURBO) .temperature(0.6) .timeout(ofSeconds(60)) .logRequests(true) .logResponses(true) .build(); } public static QianfanStreamingChatModel getStreamModel() { return QianfanStreamingChatModel.builder() .apiKey("bUyk2k96KQ2gG1ZQ8TB8t3cd") .secretKey(ApiKeys.QIAN_FAN_API_KEY) .modelName("Yi-34B-Chat") .build(); } private static QianfanChatModel getQianFanModel() { return QianfanChatModel.builder() .apiKey("bUyk2k96KQ2gG1ZQ8TB8t3cd") .secretKey(ApiKeys.QIAN_FAN_API_KEY) .modelName("Yi-34B-Chat") .build(); } }
[ "dev.langchain4j.model.qianfan.QianfanChatModel.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder", "dev.langchain4j.model.qianfan.QianfanStreamingChatModel.builder" ]
[((979, 1264), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1239), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1203), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1168), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1128), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1094), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1052), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1351, 1558), 'dev.langchain4j.model.qianfan.QianfanStreamingChatModel.builder'), ((1351, 1533), 'dev.langchain4j.model.qianfan.QianfanStreamingChatModel.builder'), ((1351, 1491), 'dev.langchain4j.model.qianfan.QianfanStreamingChatModel.builder'), ((1351, 1438), 'dev.langchain4j.model.qianfan.QianfanStreamingChatModel.builder'), ((1638, 1836), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1638, 1811), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1638, 1769), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1638, 1716), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder')]
package com.baeldung.langchain; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.time.Duration.ofSeconds; import static java.util.stream.Collectors.joining; import static org.junit.Assert.assertNotNull; import java.nio.file.Paths; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; public class ChatWithDocumentLiveTest { private static final Logger logger = LoggerFactory.getLogger(ChatWithDocumentLiveTest.class); @Test public void givenDocument_whenPrompted_thenValidResponse() { Document document = loadDocument(Paths.get("src/test/resources/example-files/simpson's_adventures.txt")); DocumentSplitter splitter = DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO)); List<TextSegment> segments = splitter.split(document); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); List<Embedding> embeddings = embeddingModel.embedAll(segments) .content(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); String question = "Who is Simpson?"; Embedding questionEmbedding = embeddingModel.embed(question) .content(); int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); PromptTemplate promptTemplate = PromptTemplate.from("Answer the following question to the best of your ability:\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Base your answer on the following information:\n" + "{{information}}"); String information = relevantEmbeddings.stream() .map(match -> match.embedded() .text()) .collect(joining("\n\n")); Map<String, Object> variables = new HashMap<>(); variables.put("question", question); variables.put("information", information); Prompt prompt = promptTemplate.apply(variables); ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(Constants.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()) .content(); logger.info(aiMessage.text()); assertNotNull(aiMessage.text()); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((3188, 3316), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3188, 3295), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3188, 3259), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package dev.langchain4j.model.vertexai; import dev.langchain4j.data.image.Image; import dev.langchain4j.model.output.Response; import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Base64; import java.util.List; import java.util.Objects; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.AssertionsForClassTypes.assertThatExceptionOfType; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; public class VertexAiImageModelIT { private static final String ENDPOINT = System.getenv("GCP_VERTEXAI_ENDPOINT"); private static final String LOCATION = System.getenv("GCP_LOCATION"); private static final String PROJECT = System.getenv("GCP_PROJECT_ID"); private static final String PUBLISHER = "google"; private static Image fromPath(Path path) { try { byte[] allBytes = Files.readAllBytes(path); String base64 = Base64.getEncoder().encodeToString(allBytes); return Image.builder() .url(path.toUri()) .base64Data(base64) .build(); } catch (IOException e) { throw new RuntimeException(e); } } @Test public void should_generate_one_image_with_persistence() { VertexAiImageModel imagenModel = VertexAiImageModel.builder() .endpoint(ENDPOINT) .location(LOCATION) .project(PROJECT) .publisher(PUBLISHER) .modelName("imagegeneration@005") .maxRetries(2) .withPersisting() .build(); Response<Image> imageResponse = imagenModel.generate("watercolor of a colorful parrot drinking a cup of coffee"); System.out.println(imageResponse.content().url()); // has a URL because the generated image is persisted into a file assertThat(imageResponse.content().url()).isNotNull(); assertThat(new File(imageResponse.content().url())).exists(); // checks that there's Base64 data representing the image assertThat(imageResponse.content().base64Data()).isNotNull(); // checks that the Base64 content is valid Base64 encoded assertDoesNotThrow(() -> Base64.getDecoder().decode(imageResponse.content().base64Data())); } @Test public void should_generate_three_images_with_persistence() { VertexAiImageModel imagenModel = VertexAiImageModel.builder() .endpoint(ENDPOINT) .location(LOCATION) .project(PROJECT) .publisher(PUBLISHER) .modelName("imagegeneration@005") .withPersisting() .build(); Response<List<Image>> imageListResponse = imagenModel.generate("photo of a sunset over Malibu beach", 3); assertThat(imageListResponse.content()).hasSize(3); imageListResponse.content().forEach(img -> { assertThat(img.url()).isNotNull(); assertThat(img.base64Data()).isNotNull(); System.out.println(img.url()); }); } @Test public void should_use_image_style_seed_image_source_and_mask_for_editing() throws URISyntaxException { VertexAiImageModel model = VertexAiImageModel.builder() .endpoint(ENDPOINT) .location(LOCATION) .project(PROJECT) .publisher(PUBLISHER) .modelName("imagegeneration@002") .seed(19707L) .sampleImageStyle(VertexAiImageModel.ImageStyle.photograph) .guidanceScale(100) .maxRetries(4) .withPersisting() .build(); Response<Image> forestResp = model.generate("lush forest"); System.out.println(forestResp.content().url()); assertThat(forestResp.content().base64Data()).isNotNull(); URI maskFileUri = Objects.requireNonNull(getClass().getClassLoader().getResource("mask.png")).toURI(); Response<Image> compositeResp = model.edit( forestResp.content(), fromPath(Paths.get(maskFileUri)), "red trees" ); System.out.println(compositeResp.content().url()); assertThat(compositeResp.content().base64Data()).isNotNull(); } @Test public void should_use_persistTo_and_image_upscaling() { Path defaultTempDirPath = Paths.get(System.getProperty("java.io.tmpdir")); VertexAiImageModel imagenModel = VertexAiImageModel.builder() .endpoint(ENDPOINT) .location(LOCATION) .project(PROJECT) .publisher(PUBLISHER) .modelName("imagegeneration@002") .sampleImageSize(1024) .withPersisting() .persistTo(defaultTempDirPath) .maxRetries(3) .build(); Response<Image> imageResponse = imagenModel.generate("A black bird looking itself in an antique mirror"); System.out.println(imageResponse.content().url()); assertThat(imageResponse.content().url()).isNotNull(); assertThat(new File(imageResponse.content().url())).exists(); assertThat(imageResponse.content().base64Data()).isNotNull(); VertexAiImageModel imagenModelForUpscaling = VertexAiImageModel.builder() .endpoint(ENDPOINT) .location(LOCATION) .project(PROJECT) .publisher(PUBLISHER) .modelName("imagegeneration@002") .sampleImageSize(4096) .withPersisting() .persistTo(defaultTempDirPath) .maxRetries(3) .build(); Response<Image> upscaledImageResponse = imagenModelForUpscaling.edit(imageResponse.content(), ""); System.out.println(upscaledImageResponse.content().url()); assertThat(upscaledImageResponse.content().url()).isNotNull(); assertThat(new File(upscaledImageResponse.content().url())).exists(); assertThat(upscaledImageResponse.content().base64Data()).isNotNull(); } @Test public void should_use_negative_prompt_and_different_prompt_language() { VertexAiImageModel imagenModel = VertexAiImageModel.builder() .endpoint(ENDPOINT) .location(LOCATION) .project(PROJECT) .publisher(PUBLISHER) .modelName("imagegeneration@005") .language("ja") .negativePrompt("pepperoni, pineapple") .maxRetries(2) .withPersisting() .build(); Response<Image> imageResponse = imagenModel.generate("ピザ"); // pizza System.out.println(imageResponse.content().url()); assertThat(imageResponse.content().url()).isNotNull(); assertThat(imageResponse.content().base64Data()).isNotNull(); } @Test public void should_raise_error_on_problematic_prompt_or_content_generation() { VertexAiImageModel imagenModel = VertexAiImageModel.builder() .endpoint(ENDPOINT) .location(LOCATION) .project(PROJECT) .publisher(PUBLISHER) .modelName("imagegeneration@005") .withPersisting() .build(); assertThatExceptionOfType(Throwable.class).isThrownBy(() -> imagenModel.generate("a nude woman")); } }
[ "dev.langchain4j.data.image.Image.builder" ]
[((1109, 1153), 'java.util.Base64.getEncoder'), ((1174, 1297), 'dev.langchain4j.data.image.Image.builder'), ((1174, 1268), 'dev.langchain4j.data.image.Image.builder'), ((1174, 1228), 'dev.langchain4j.data.image.Image.builder'), ((2446, 2510), 'java.util.Base64.getDecoder'), ((4141, 4224), 'java.util.Objects.requireNonNull')]
import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; import java.util.HashMap; import java.util.List; import java.util.Map; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.util.stream.Collectors.joining; public class ChatWithDocumentsExamples { // Please also check ServiceWithRetrieverExample static class IfYouNeedSimplicity { public static void main(String[] args) throws Exception { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(500, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Document document = loadDocument(toPath("example-files/story-about-happy-carrot.txt")); ingestor.ingest(document); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) // .chatMemory() // you can override default chat memory // .promptTemplate() // you can override default prompt template .build(); String answer = chain.execute("Who is Charlie?"); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } } static class If_You_Need_More_Control { public static void main(String[] args) { // Load the document that includes the information you'd like to "chat" about with the model. Document document = loadDocument(toPath("example-files/story-about-happy-carrot.txt")); // Split document into segments 100 tokens each DocumentSplitter splitter = DocumentSplitters.recursive( 100, 0, new OpenAiTokenizer(GPT_3_5_TURBO) ); List<TextSegment> segments = splitter.split(document); // Embed segments (convert them into vectors that represent the meaning) using embedding model EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); List<Embedding> embeddings = embeddingModel.embedAll(segments).content(); // Store embeddings into embedding store for further search / retrieval EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); // Specify the question you want to ask the model String question = "Who is Charlie?"; // Embed the question Embedding questionEmbedding = embeddingModel.embed(question).content(); // Find relevant embeddings in embedding store by semantic similarity // You can play with parameters below to find a sweet spot for your specific use case int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); // Create a prompt for the model that includes question and relevant embeddings PromptTemplate promptTemplate = PromptTemplate.from( "Answer the following question to the best of your ability:\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Base your answer on the following information:\n" + "{{information}}"); String information = relevantEmbeddings.stream() .map(match -> match.embedded().text()) .collect(joining("\n\n")); Map<String, Object> variables = new HashMap<>(); variables.put("question", question); variables.put("information", information); Prompt prompt = promptTemplate.apply(variables); // Send the prompt to the OpenAI chat model ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(ApiKeys.OPENAI_API_KEY) .timeout(Duration.ofSeconds(60)) .build(); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content(); // See an answer from the model String answer = aiMessage.text(); System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille... } } private static Path toPath(String fileName) { try { URL fileUrl = ChatWithDocumentsExamples.class.getResource(fileName); return Paths.get(fileUrl.toURI()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1935, 2179), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1935, 2149), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1935, 2096), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1935, 2043), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2376, 2794), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2376, 2600), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2376, 2506), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((5821, 5983), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5821, 5953), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5821, 5899), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package com.baeldung.langchain; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static java.time.Duration.ofSeconds; import static org.junit.Assert.assertNotNull; import java.nio.file.Paths; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; public class ChainWithDocumentLiveTest { private static final Logger logger = LoggerFactory.getLogger(ChainWithDocumentLiveTest.class); @Test public void givenChainWithDocument_whenPrompted_thenValidResponse() { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(500, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Document document = loadDocument(Paths.get("src/test/resources/example-files/simpson's_adventures.txt")); ingestor.ingest(document); ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(Constants.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatModel) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .promptTemplate(PromptTemplate.from("Answer the following question to the best of your ability: {{question}}\n\nBase your answer on the following information:\n{{information}}")) .build(); String answer = chain.execute("Who is Simpson?"); logger.info(answer); assertNotNull(answer); } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1556, 1764), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1743), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1699), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1655), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1955, 2083), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1955, 2062), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1955, 2026), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2131, 2577), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2556), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2365), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2296), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2211), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')]
package org.acme.examples.aiservices; import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.options; import static dev.langchain4j.data.message.ChatMessageDeserializer.messagesFromJson; import static dev.langchain4j.data.message.ChatMessageSerializer.messagesToJson; import static dev.langchain4j.data.message.ChatMessageType.AI; import static dev.langchain4j.data.message.ChatMessageType.USER; import static io.quarkiverse.langchain4j.openai.test.WiremockUtils.DEFAULT_TOKEN; import static org.acme.examples.aiservices.MessageAssertUtils.assertMultipleRequestMessage; import static org.acme.examples.aiservices.MessageAssertUtils.assertSingleRequestMessage; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.tuple; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import jakarta.enterprise.context.control.ActivateRequestContext; import jakarta.inject.Inject; import jakarta.inject.Singleton; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.stubbing.Scenario; import com.github.tomakehurst.wiremock.stubbing.ServeEvent; import com.github.tomakehurst.wiremock.verification.LoggedRequest; import dev.langchain4j.agent.tool.Tool; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.ChatMemoryProvider; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.retriever.Retriever; import dev.langchain4j.service.MemoryId; import dev.langchain4j.service.UserMessage; import dev.langchain4j.store.memory.chat.ChatMemoryStore; import io.quarkiverse.langchain4j.RegisterAiService; import io.quarkiverse.langchain4j.openai.test.WiremockUtils; import io.quarkus.arc.Arc; import io.quarkus.test.QuarkusUnitTest; public class DeclarativeAiServicesTest { private static final int WIREMOCK_PORT = 8089; @RegisterExtension static final QuarkusUnitTest unitTest = new QuarkusUnitTest() .setArchiveProducer( () -> ShrinkWrap.create(JavaArchive.class).addClasses(WiremockUtils.class, MessageAssertUtils.class)) .overrideRuntimeConfigKey("quarkus.langchain4j.openai.api-key", "whatever") .overrideRuntimeConfigKey("quarkus.langchain4j.openai.base-url", "http://localhost:" + WIREMOCK_PORT + "/v1"); private static final TypeReference<Map<String, Object>> MAP_TYPE_REF = new TypeReference<>() { }; static WireMockServer wireMockServer; static ObjectMapper mapper; @BeforeAll static void beforeAll() { wireMockServer = new WireMockServer(options().port(WIREMOCK_PORT)); wireMockServer.start(); mapper = new ObjectMapper(); } @AfterAll static void afterAll() { wireMockServer.stop(); } @BeforeEach void setup() { wireMockServer.resetAll(); wireMockServer.stubFor(WiremockUtils.defaultChatCompletionsStub()); } interface AssistantBase { String chat(String message); } @RegisterAiService interface Assistant extends AssistantBase { String chat2(String message); } @Inject Assistant assistant; @Test @ActivateRequestContext public void test_simple_instruction_with_single_argument_and_no_annotations_from_super() throws IOException { String result = assistant.chat("Tell me a joke about developers"); assertThat(result).isNotBlank(); assertSingleRequestMessage(getRequestAsMap(), "Tell me a joke about developers"); } @Test @ActivateRequestContext public void test_simple_instruction_with_single_argument_and_no_annotations_from_iface() throws IOException { String result = assistant.chat2("Tell me a joke about developers"); assertThat(result).isNotBlank(); assertSingleRequestMessage(getRequestAsMap(), "Tell me a joke about developers"); } @Singleton public static class DummyRetriever implements Retriever<TextSegment> { @Override public List<TextSegment> findRelevant(String text) { return List.of(TextSegment.from("dummy")); } } @RegisterAiService(retriever = DummyRetriever.class) interface AssistantWithRetriever { String chat(String message); } @Inject AssistantWithRetriever assistantWithRetriever; @Test @ActivateRequestContext public void test_simple_instruction_with_retriever() throws IOException { String result = assistantWithRetriever.chat("Tell me a joke about developers"); assertThat(result).isNotBlank(); assertSingleRequestMessage(getRequestAsMap(), "Tell me a joke about developers\n\nAnswer using the following information:\ndummy"); } enum Sentiment { POSITIVE, NEUTRAL, NEGATIVE } @RegisterAiService interface SentimentAnalyzer { @UserMessage("Analyze sentiment of {it}") Sentiment analyzeSentimentOf(String text); } @Inject SentimentAnalyzer sentimentAnalyzer; @Test @ActivateRequestContext void test_extract_enum() throws IOException { wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "POSITIVE")); Sentiment sentiment = sentimentAnalyzer .analyzeSentimentOf("This LaptopPro X15 is wicked fast and that 4K screen is a dream."); assertThat(sentiment).isEqualTo(Sentiment.POSITIVE); assertSingleRequestMessage(getRequestAsMap(), "Analyze sentiment of This LaptopPro X15 is wicked fast and that 4K screen is a dream.\nYou must answer strictly in the following format: one of [POSITIVE, NEUTRAL, NEGATIVE]"); } @Singleton static class Calculator { private final Runnable after; Calculator(CalculatorAfter after) { this.after = after; } @Tool("calculates the square root of the provided number") double squareRoot(double number) { var result = Math.sqrt(number); after.run(); return result; } } private static final String scenario = "tools"; private static final String secondState = "second"; @Singleton public static class CalculatorAfter implements Runnable { @Override public void run() { wireMockServer.setScenarioState(scenario, secondState); } } public static class ChatMemoryProviderProducer { @Singleton ChatMemoryProvider chatMemory(ChatMemoryStore store) { return memoryId -> MessageWindowChatMemory.builder() .id(memoryId) .maxMessages(10) .chatMemoryStore(store) .build(); } } @Singleton public static class CustomChatMemoryStore implements ChatMemoryStore { // emulating persistent storage private final Map</* memoryId */ Object, String> persistentStorage = new HashMap<>(); @Override public List<ChatMessage> getMessages(Object memoryId) { return messagesFromJson(persistentStorage.get(memoryId)); } @Override public void updateMessages(Object memoryId, List<ChatMessage> messages) { persistentStorage.put(memoryId, messagesToJson(messages)); } @Override public void deleteMessages(Object memoryId) { persistentStorage.remove(memoryId); } } @RegisterAiService(tools = Calculator.class) interface AssistantWithCalculator extends AssistantBase { } @Inject AssistantWithCalculator assistantWithCalculator; @Test @ActivateRequestContext void should_execute_tool_then_answer() throws IOException { var firstResponse = """ { "id": "chatcmpl-8D88Dag1gAKnOPP9Ed4bos7vSpaNz", "object": "chat.completion", "created": 1698140213, "model": "gpt-3.5-turbo-0613", "choices": [ { "index": 0, "message": { "role": "assistant", "content": null, "function_call": { "name": "squareRoot", "arguments": "{\\n \\"number\\": 485906798473894056\\n}" } }, "finish_reason": "function_call" } ], "usage": { "prompt_tokens": 65, "completion_tokens": 20, "total_tokens": 85 } } """; var secondResponse = """ { "id": "chatcmpl-8D88FIAUWSpwLaShFr0w8G1SWuVdl", "object": "chat.completion", "created": 1698140215, "model": "gpt-3.5-turbo-0613", "choices": [ { "index": 0, "message": { "role": "assistant", "content": "The square root of 485,906,798,473,894,056 in scientific notation is approximately 6.97070153193991E8." }, "finish_reason": "stop" } ], "usage": { "prompt_tokens": 102, "completion_tokens": 33, "total_tokens": 135 } } """; wireMockServer.stubFor( WiremockUtils.chatCompletionMapping(DEFAULT_TOKEN) .inScenario(scenario) .whenScenarioStateIs(Scenario.STARTED) .willReturn(WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody(firstResponse))); wireMockServer.stubFor( WiremockUtils.chatCompletionMapping(DEFAULT_TOKEN) .inScenario(scenario) .whenScenarioStateIs(secondState) .willReturn(WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody(secondResponse))); wireMockServer.setScenarioState(scenario, Scenario.STARTED); String userMessage = "What is the square root of 485906798473894056 in scientific notation?"; String answer = assistantWithCalculator.chat(userMessage); assertThat(answer).isEqualTo( "The square root of 485,906,798,473,894,056 in scientific notation is approximately 6.97070153193991E8."); assertThat(wireMockServer.getAllServeEvents()).hasSize(2); assertSingleRequestMessage(getRequestAsMap(getRequestBody(wireMockServer.getAllServeEvents().get(1))), "What is the square root of 485906798473894056 in scientific notation?"); assertMultipleRequestMessage(getRequestAsMap(getRequestBody(wireMockServer.getAllServeEvents().get(0))), List.of( new MessageAssertUtils.MessageContent("user", "What is the square root of 485906798473894056 in scientific notation?"), new MessageAssertUtils.MessageContent("assistant", null), new MessageAssertUtils.MessageContent("function", "6.97070153193991E8"))); } @RegisterAiService interface ChatWithSeparateMemoryForEachUser { String chat(@MemoryId int memoryId, @UserMessage String userMessage); } @Inject ChatWithSeparateMemoryForEachUser chatWithSeparateMemoryForEachUser; @Test @ActivateRequestContext void should_keep_separate_chat_memory_for_each_user_in_store() throws IOException { ChatMemoryStore store = Arc.container().instance(ChatMemoryStore.class).get(); int firstMemoryId = 1; int secondMemoryId = 2; /* **** First request for user 1 **** */ String firstMessageFromFirstUser = "Hello, my name is Klaus"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Klaus")); String firstAiResponseToFirstUser = chatWithSeparateMemoryForEachUser.chat(firstMemoryId, firstMessageFromFirstUser); // assert response assertThat(firstAiResponseToFirstUser).isEqualTo("Nice to meet you Klaus"); // assert request assertSingleRequestMessage(getRequestAsMap(), firstMessageFromFirstUser); // assert chat memory assertThat(store.getMessages(firstMemoryId)).hasSize(2) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromFirstUser), tuple(AI, firstAiResponseToFirstUser)); /* **** First request for user 2 **** */ wireMockServer.resetRequests(); String firstMessageFromSecondUser = "Hello, my name is Francine"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Francine")); String firstAiResponseToSecondUser = chatWithSeparateMemoryForEachUser.chat(secondMemoryId, firstMessageFromSecondUser); // assert response assertThat(firstAiResponseToSecondUser).isEqualTo("Nice to meet you Francine"); // assert request assertSingleRequestMessage(getRequestAsMap(), firstMessageFromSecondUser); // assert chat memory assertThat(store.getMessages(secondMemoryId)).hasSize(2) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromSecondUser), tuple(AI, firstAiResponseToSecondUser)); /* **** Second request for user 1 **** */ wireMockServer.resetRequests(); String secondsMessageFromFirstUser = "What is my name?"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Your name is Klaus")); String secondAiMessageToFirstUser = chatWithSeparateMemoryForEachUser.chat(firstMemoryId, secondsMessageFromFirstUser); // assert response assertThat(secondAiMessageToFirstUser).contains("Klaus"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageAssertUtils.MessageContent("user", firstMessageFromFirstUser), new MessageAssertUtils.MessageContent("assistant", firstAiResponseToFirstUser), new MessageAssertUtils.MessageContent("user", secondsMessageFromFirstUser))); // assert chat memory assertThat(store.getMessages(firstMemoryId)).hasSize(4) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromFirstUser), tuple(AI, firstAiResponseToFirstUser), tuple(USER, secondsMessageFromFirstUser), tuple(AI, secondAiMessageToFirstUser)); /* **** Second request for user 2 **** */ wireMockServer.resetRequests(); String secondsMessageFromSecondUser = "What is my name?"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Your name is Francine")); String secondAiMessageToSecondUser = chatWithSeparateMemoryForEachUser.chat(secondMemoryId, secondsMessageFromSecondUser); // assert response assertThat(secondAiMessageToSecondUser).contains("Francine"); // assert request assertMultipleRequestMessage(getRequestAsMap(), List.of( new MessageAssertUtils.MessageContent("user", firstMessageFromSecondUser), new MessageAssertUtils.MessageContent("assistant", firstAiResponseToSecondUser), new MessageAssertUtils.MessageContent("user", secondsMessageFromSecondUser))); // assert chat memory assertThat(store.getMessages(secondMemoryId)).hasSize(4) .extracting(ChatMessage::type, ChatMessage::text) .containsExactly(tuple(USER, firstMessageFromSecondUser), tuple(AI, firstAiResponseToSecondUser), tuple(USER, secondsMessageFromSecondUser), tuple(AI, secondAiMessageToSecondUser)); } @RegisterAiService(chatMemoryProviderSupplier = RegisterAiService.NoChatMemoryProviderSupplier.class) interface NoMemoryService { String chat(@UserMessage String userMessage); } @Inject NoMemoryService noMemoryService; @Test @ActivateRequestContext void no_memory_should_be_used() throws IOException { String firstUserMessage = "Hello, my name is Klaus"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "Nice to meet you Klaus")); String firstAiResponse = noMemoryService.chat(firstUserMessage); // assert response assertThat(firstAiResponse).isEqualTo("Nice to meet you Klaus"); // assert request assertSingleRequestMessage(getRequestAsMap(), firstUserMessage); wireMockServer.resetRequests(); String secondUserMessage = "What is my name"; wireMockServer.stubFor(WiremockUtils.chatCompletionsMessageContent(Optional.empty(), "I don't know")); String secondAiResponse = noMemoryService.chat(secondUserMessage); // assert response assertThat(secondAiResponse).isEqualTo("I don't know"); // assert request only contains the second request, so no memory is used assertSingleRequestMessage(getRequestAsMap(), secondUserMessage); } private Map<String, Object> getRequestAsMap() throws IOException { return getRequestAsMap(getRequestBody()); } private Map<String, Object> getRequestAsMap(byte[] body) throws IOException { return mapper.readValue(body, MAP_TYPE_REF); } private byte[] getRequestBody() { assertThat(wireMockServer.getAllServeEvents()).hasSize(1); ServeEvent serveEvent = wireMockServer.getAllServeEvents().get(0); // this works because we reset requests for Wiremock before each test return getRequestBody(serveEvent); } private byte[] getRequestBody(ServeEvent serveEvent) { LoggedRequest request = serveEvent.getRequest(); assertThat(request.getBody()).isNotEmpty(); return request.getBody(); } }
[ "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder" ]
[((2567, 2661), 'org.jboss.shrinkwrap.api.ShrinkWrap.create'), ((7155, 7332), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((7155, 7303), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((7155, 7259), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((7155, 7222), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((10452, 10713), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((10452, 10611), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((10452, 10548), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((10648, 10712), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody'), ((10764, 11021), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((10764, 10918), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((10764, 10860), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.chatCompletionMapping'), ((10955, 11020), 'io.quarkiverse.langchain4j.openai.test.WiremockUtils.CHAT_RESPONSE_WITHOUT_BODY.withBody'), ((12603, 12656), 'io.quarkus.arc.Arc.container'), ((12603, 12650), 'io.quarkus.arc.Arc.container')]
package org.acme.example; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.inject.Produces; import jakarta.inject.Inject; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.rag.DefaultRetrievalAugmentor; import dev.langchain4j.rag.RetrievalAugmentor; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import io.quarkiverse.langchain4j.RegisterAiService; @RegisterAiService public interface AiServiceWithAutoDiscoveredRetrievalAugmentor { String chat(String message); @ApplicationScoped class AugmentorProducer { @Inject InMemoryEmbeddingStore<TextSegment> store; @Inject EmbeddingModel embeddingModel; @Produces public RetrievalAugmentor get() { ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder() .embeddingModel(embeddingModel) .embeddingStore(store) .maxResults(1) .build(); return DefaultRetrievalAugmentor.builder() .contentRetriever(contentRetriever) .build(); } } }
[ "dev.langchain4j.rag.DefaultRetrievalAugmentor.builder", "dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder" ]
[((1030, 1229), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1030, 1200), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1030, 1165), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1030, 1122), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((1250, 1370), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((1250, 1341), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder')]
package org.jxch.capital.config; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.embedding.OnnxEmbeddingModel; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.milvus.MilvusEmbeddingStore; import lombok.extern.slf4j.Slf4j; import org.junit.jupiter.api.Test; import org.springframework.boot.test.context.SpringBootTest; import java.util.List; @Slf4j @SpringBootTest class MilvusConfigTest { // @Autowired private MilvusEmbeddingStore embeddingStore2; @Test void milvusEmbeddingStore() { // EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingModel embeddingModel = new OnnxEmbeddingModel("D:\\huggingface\\shibing624\\text2vec-base-chinese\\onnx\\model.onnx"); EmbeddingStore<TextSegment> embeddingStore = MilvusEmbeddingStore.builder() .host("localhost") .port(19530) .dimension(768) .build(); TextSegment segment1 = TextSegment.from("突破K线通常具有高成交量,有时候其成交量将是普通K线的10至20倍。成交量越高,以及急速拉升的K线越多,出现重大后续行情的几率就越高。在突破之前,多头和空头均在分批建仓,争夺市场的控制权,双方均试图在各自方向成功突破。一旦出现明确的突破,输的一方会很快斩仓止损,而赢的一方甚至会更为激进地加仓。结果是一根或多根趋势K线,通常伴有高成交量。成交量并非总是特别高,但是当其为近期K线平均水平的10倍或更高时,成功突破的概率就更高。成功突破指的是拥有多根后续K线。此外,在几根K线之内失败的突破也可能伴有非比寻常的高成交量,但是这种情况较不常见。成交量的可靠性不足以指导决策,而构成急速拉升的大型趋势K线已经告诉你突破是否很可能会成功。试图将成交量纳人考虑,更多时候会让你分心,妨碍你发挥最佳水平。"); Embedding embedding1 = embeddingModel.embed(segment1).content(); embeddingStore.add(embedding1, segment1); Embedding queryEmbedding = embeddingModel.embed("如K线重卺,影线越来越长,反方向的趋势K线和回调K线。尽管趋势可能持续很长时间,但是这一段趋势通常会被回撤,成为交易区间的一部分。举例而言,在个急速与通道的上涨趋势形态中,急速拉升是突破,通道通常成为交易区间的第一腿,因此经常被回撤。").content(); List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(queryEmbedding, 8); EmbeddingMatch<TextSegment> embeddingMatch = relevant.get(0); for (EmbeddingMatch<TextSegment> match : relevant) { System.out.println(match.score()); // 0.8144287765026093 System.out.println(match.embedded().text()); // I like football. } } }
[ "dev.langchain4j.store.embedding.milvus.MilvusEmbeddingStore.builder" ]
[((1007, 1158), 'dev.langchain4j.store.embedding.milvus.MilvusEmbeddingStore.builder'), ((1007, 1133), 'dev.langchain4j.store.embedding.milvus.MilvusEmbeddingStore.builder'), ((1007, 1101), 'dev.langchain4j.store.embedding.milvus.MilvusEmbeddingStore.builder'), ((1007, 1072), 'dev.langchain4j.store.embedding.milvus.MilvusEmbeddingStore.builder')]
package dev.langchain4j.chain; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.output.Response; import dev.langchain4j.rag.content.injector.DefaultContentInjector; import dev.langchain4j.rag.DefaultRetrievalAugmentor; import dev.langchain4j.rag.content.Content; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.retriever.Retriever; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; import org.mockito.Spy; import org.mockito.junit.jupiter.MockitoExtension; import java.util.List; import static dev.langchain4j.data.message.AiMessage.aiMessage; import static java.util.Arrays.asList; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.*; @ExtendWith(MockitoExtension.class) class ConversationalRetrievalChainTest { private static final String QUERY = "query"; private static final String ANSWER = "answer"; @Mock ChatLanguageModel chatLanguageModel; @Mock ContentRetriever contentRetriever; @Mock Retriever<TextSegment> retriever; @Spy ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10); @Captor ArgumentCaptor<List<ChatMessage>> messagesCaptor; @BeforeEach void beforeEach() { when(chatLanguageModel.generate(anyList())).thenReturn(Response.from(aiMessage(ANSWER))); } @Test void should_inject_retrieved_segments() { // given when(contentRetriever.retrieve(any())).thenReturn(asList( Content.from("Segment 1"), Content.from("Segment 2") )); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .contentRetriever(contentRetriever) .build(); // when String answer = chain.execute(QUERY); // then assertThat(answer).isEqualTo(ANSWER); verify(chatLanguageModel).generate(messagesCaptor.capture()); UserMessage expectedUserMessage = UserMessage.from( "query\n" + "\n" + "Answer using the following information:\n" + "Segment 1\n" + "\n" + "Segment 2"); assertThat(messagesCaptor.getValue()).containsExactly(expectedUserMessage); assertThat(chatMemory.messages()).containsExactly( expectedUserMessage, AiMessage.from(ANSWER) ); } @Test void should_inject_retrieved_segments_using_custom_prompt_template() { // given when(contentRetriever.retrieve(any())).thenReturn(asList( Content.from("Segment 1"), Content.from("Segment 2") )); PromptTemplate promptTemplate = PromptTemplate.from( "Answer '{{userMessage}}' using '{{contents}}'"); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .retrievalAugmentor(DefaultRetrievalAugmentor.builder() .contentRetriever(contentRetriever) .contentInjector(DefaultContentInjector.builder() .promptTemplate(promptTemplate) .build()) .build()) .build(); // when String answer = chain.execute(QUERY); // then assertThat(answer).isEqualTo(ANSWER); verify(chatLanguageModel).generate(messagesCaptor.capture()); UserMessage expectedUserMessage = UserMessage.from( "Answer 'query' using 'Segment 1\n\nSegment 2'"); assertThat(messagesCaptor.getValue()).containsExactly(expectedUserMessage); assertThat(chatMemory.messages()).containsExactly( expectedUserMessage, AiMessage.from(ANSWER) ); } @Test void test_backward_compatibility_should_inject_retrieved_segments() { // given when(retriever.findRelevant(QUERY)).thenReturn(asList( TextSegment.from("Segment 1"), TextSegment.from("Segment 2") )); when(retriever.toContentRetriever()).thenCallRealMethod(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .retriever(retriever) .build(); // when String answer = chain.execute(QUERY); // then assertThat(answer).isEqualTo(ANSWER); verify(chatLanguageModel).generate(messagesCaptor.capture()); UserMessage expectedUserMessage = UserMessage.from( "Answer the following question to the best of your ability: query\n" + "\n" + "Base your answer on the following information:\n" + "Segment 1\n" + "\n" + "Segment 2"); assertThat(messagesCaptor.getValue()).containsExactly(expectedUserMessage); assertThat(chatMemory.messages()).containsExactly( expectedUserMessage, AiMessage.from(ANSWER) ); } @Test void test_backward_compatibility_should_inject_retrieved_segments_using_custom_prompt_template() { // given when(retriever.findRelevant(QUERY)).thenReturn(asList( TextSegment.from("Segment 1"), TextSegment.from("Segment 2") )); when(retriever.toContentRetriever()).thenCallRealMethod(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .promptTemplate(PromptTemplate.from("Answer '{{question}}' using '{{information}}'")) .retriever(retriever) .build(); // when String answer = chain.execute(QUERY); // then assertThat(answer).isEqualTo(ANSWER); verify(chatLanguageModel).generate(messagesCaptor.capture()); UserMessage expectedUserMessage = UserMessage.from( "Answer 'query' using 'Segment 1\n\nSegment 2'"); assertThat(messagesCaptor.getValue()).containsExactly(expectedUserMessage); assertThat(chatMemory.messages()).containsExactly( expectedUserMessage, AiMessage.from(ANSWER) ); } }
[ "dev.langchain4j.rag.DefaultRetrievalAugmentor.builder", "dev.langchain4j.rag.content.injector.DefaultContentInjector.builder" ]
[((3725, 4033), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((3725, 4000), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((3725, 3820), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((3862, 3999), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder'), ((3862, 3958), 'dev.langchain4j.rag.content.injector.DefaultContentInjector.builder')]
package dev.langchain4j.rag.content.injector; import dev.langchain4j.data.document.Metadata; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.rag.content.Content; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.util.List; import java.util.function.Function; import java.util.stream.Stream; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.assertj.core.api.Assertions.assertThat; class DefaultContentInjectorTest { @Test void should_not_inject_when_no_content() { // given UserMessage userMessage = UserMessage.from("Tell me about bananas."); List<Content> contents = emptyList(); ContentInjector injector = new DefaultContentInjector(); // when UserMessage injected = injector.inject(contents, userMessage); // then assertThat(injected).isEqualTo(userMessage); } @Test void should_inject_single_content() { // given UserMessage userMessage = UserMessage.from("Tell me about bananas."); List<Content> contents = singletonList(Content.from("Bananas are awesome!")); ContentInjector injector = new DefaultContentInjector(); // when UserMessage injected = injector.inject(contents, userMessage); // then assertThat(injected.text()).isEqualTo( "Tell me about bananas.\n" + "\n" + "Answer using the following information:\n" + "Bananas are awesome!" ); } @Test void should_inject_single_content_with_metadata() { // given UserMessage userMessage = UserMessage.from("Tell me about bananas."); TextSegment segment = TextSegment.from( "Bananas are awesome!", Metadata.from("source", "trust me bro") ); List<Content> contents = singletonList(Content.from(segment)); List<String> metadataKeysToInclude = singletonList("source"); ContentInjector injector = new DefaultContentInjector(metadataKeysToInclude); // when UserMessage injected = injector.inject(contents, userMessage); // then assertThat(injected.text()).isEqualTo( "Tell me about bananas.\n" + "\n" + "Answer using the following information:\n" + "content: Bananas are awesome!\n" + "source: trust me bro" ); } @Test void should_inject_multiple_contents() { // given UserMessage userMessage = UserMessage.from("Tell me about bananas."); List<Content> contents = asList( Content.from("Bananas are awesome!"), Content.from("Bananas are healthy!") ); ContentInjector injector = new DefaultContentInjector(); // when UserMessage injected = injector.inject(contents, userMessage); // then assertThat(injected.text()).isEqualTo( "Tell me about bananas.\n" + "\n" + "Answer using the following information:\n" + "Bananas are awesome!\n" + "\n" + "Bananas are healthy!" ); } @ParameterizedTest @MethodSource void should_inject_multiple_contents_with_multiple_metadata_entries( Function<List<String>, ContentInjector> contentInjectorProvider ) { // given UserMessage userMessage = UserMessage.from("Tell me about bananas."); TextSegment segment1 = TextSegment.from( "Bananas are awesome!", Metadata.from("source", "trust me bro") .add("date", "today") ); TextSegment segment2 = TextSegment.from( "Bananas are healthy!", Metadata.from("source", "my doctor") .add("reliability", "100%") ); List<Content> contents = asList(Content.from(segment1), Content.from(segment2)); List<String> metadataKeysToInclude = asList("source", "reliability", "date"); ContentInjector injector = contentInjectorProvider.apply(metadataKeysToInclude); // when UserMessage injected = injector.inject(contents, userMessage); // then assertThat(injected.text()).isEqualTo( "Tell me about bananas.\n" + "\n" + "Answer using the following information:\n" + "content: Bananas are awesome!\n" + "source: trust me bro\n" + "date: today\n" + "\n" + "content: Bananas are healthy!\n" + "source: my doctor\n" + "reliability: 100%" ); } static Stream<Arguments> should_inject_multiple_contents_with_multiple_metadata_entries() { return Stream.<Arguments>builder() .add(Arguments.of( (Function<List<String>, ContentInjector>) DefaultContentInjector::new )) .add(Arguments.of( (Function<List<String>, ContentInjector>) (metadataKeysToInclude) -> DefaultContentInjector.builder() .metadataKeysToInclude(metadataKeysToInclude) .build() )) .build(); } @ParameterizedTest @MethodSource void should_inject_multiple_contents_with_custom_prompt_template( Function<PromptTemplate, ContentInjector> contentInjectorProvider) { // given PromptTemplate promptTemplate = PromptTemplate.from("{{userMessage}}\n{{contents}}"); UserMessage userMessage = UserMessage.from("Tell me about bananas."); List<Content> contents = asList( Content.from("Bananas are awesome!"), Content.from("Bananas are healthy!") ); ContentInjector injector = contentInjectorProvider.apply(promptTemplate); // when UserMessage injected = injector.inject(contents, userMessage); // then assertThat(injected.text()).isEqualTo( "Tell me about bananas.\n" + "Bananas are awesome!\n" + "\n" + "Bananas are healthy!" ); } static Stream<Arguments> should_inject_multiple_contents_with_custom_prompt_template() { return Stream.<Arguments>builder() .add(Arguments.of( (Function<PromptTemplate, ContentInjector>) DefaultContentInjector::new )) .add(Arguments.of( (Function<PromptTemplate, ContentInjector>) (promptTemplate) -> DefaultContentInjector.builder() .promptTemplate(promptTemplate) .build() )) .build(); } }
[ "dev.langchain4j.data.document.Metadata.from" ]
[((4092, 4177), 'dev.langchain4j.data.document.Metadata.from'), ((4294, 4382), 'dev.langchain4j.data.document.Metadata.from'), ((5423, 5970), 'java.util.stream.Stream.<Arguments>builder'), ((5423, 5945), 'java.util.stream.Stream.<Arguments>builder'), ((5423, 5598), 'java.util.stream.Stream.<Arguments>builder'), ((7055, 7585), 'java.util.stream.Stream.<Arguments>builder'), ((7055, 7560), 'java.util.stream.Stream.<Arguments>builder'), ((7055, 7232), 'java.util.stream.Stream.<Arguments>builder')]
package io.thomasvitale.langchain4j.spring.openai; import java.util.List; import dev.langchain4j.agent.tool.ToolExecutionRequest; import dev.langchain4j.agent.tool.ToolSpecification; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.message.ChatMessage; import dev.langchain4j.data.message.SystemMessage; import dev.langchain4j.data.message.ToolExecutionResultMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.model.output.FinishReason; import dev.langchain4j.model.output.Response; import dev.langchain4j.model.output.TokenUsage; import org.json.JSONException; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; import org.skyscreamer.jsonassert.JSONAssert; import org.skyscreamer.jsonassert.JSONCompareMode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.web.client.RestClient; import io.thomasvitale.langchain4j.spring.openai.api.chat.ChatCompletionFinishReason; import io.thomasvitale.langchain4j.spring.openai.api.chat.ChatCompletionRequest; import io.thomasvitale.langchain4j.spring.openai.client.OpenAiClient; import io.thomasvitale.langchain4j.spring.openai.client.OpenAiClientConfig; import static dev.langchain4j.agent.tool.JsonSchemaProperty.INTEGER; import static dev.langchain4j.data.message.UserMessage.userMessage; import static dev.langchain4j.model.output.FinishReason.STOP; import static dev.langchain4j.model.output.FinishReason.TOOL_EXECUTION; import static org.assertj.core.api.Assertions.assertThat; /** * Integration tests for {@link OpenAiChatModel}. * <p> * Adapted from OpenAiChatModelIT in the LangChain4j project. */ @EnabledIfEnvironmentVariable(named = "LANGCHAIN4J_OPENAI_CLIENT_API_KEY", matches = ".*") class OpenAiChatModelIT { private static final Logger logger = LoggerFactory.getLogger(OpenAiChatModelIT.class); public static final String MODEL_NAME = "gpt-3.5-turbo"; private static OpenAiClient openAiClient; private ToolSpecification calculator = ToolSpecification.builder() .name("calculator") .description("returns a sum of two numbers") .addParameter("first", INTEGER) .addParameter("second", INTEGER) .build(); @BeforeAll static void beforeAll() { var apiKey = System.getenv("LANGCHAIN4J_OPENAI_CLIENT_API_KEY"); openAiClient = new OpenAiClient(OpenAiClientConfig.builder().apiKey(apiKey).build(), RestClient.builder()); } @Test void generateText() { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder().build()) .build(); UserMessage userMessage = UserMessage.from("What is the capital of Italy?"); Response<AiMessage> response = chatModel.generate(userMessage); logger.info("Response: \n" + response); AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).contains("Rome"); assertThat(aiMessage.toolExecutionRequests()).isNull(); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason().name()).isEqualToIgnoringCase(ChatCompletionFinishReason.STOP.name()); } @Test void generateTextTooLong() { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder() .maxTokens(1).build()) .build(); UserMessage userMessage = UserMessage.from("What is the capital of Denmark?"); Response<AiMessage> response = chatModel.generate(userMessage); logger.info("Response: \n" + response); AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).isNotBlank(); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.outputTokenCount()).isEqualTo(1); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason().name()).isEqualToIgnoringCase(ChatCompletionFinishReason.LENGTH.name()); } @Test void generateTextWithFewShots() { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder().model(MODEL_NAME).build()) .build(); List<ChatMessage> messages = List.of( UserMessage.from("1 + 1 ="), AiMessage.from(">>> 2"), UserMessage.from("2 + 2 ="), AiMessage.from(">>> 4"), UserMessage.from("4 + 4 =")); Response<AiMessage> response = chatModel.generate(messages); logger.info("Response: \n" + response); assertThat(response.content().text()).startsWith(">>> 8"); } @Test void generateTextWithSystemMessage() { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder().model(MODEL_NAME).build()) .build(); SystemMessage systemMessage = SystemMessage.from("Start every answer with Ahoy"); UserMessage userMessage = UserMessage.from("Hello, captain!"); Response<AiMessage> response = chatModel.generate(systemMessage, userMessage); logger.info("Response: \n" + response); assertThat(response.content().text()).containsIgnoringCase("Ahoy"); } @Test void generateTextWithNumPredict() { var maximumOutputTokens = 1; OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder().model(MODEL_NAME).maxTokens(maximumOutputTokens).build()) .build(); UserMessage userMessage = UserMessage.from("What is the capital of Italy?"); Response<AiMessage> response = chatModel.generate(userMessage); logger.info("Response: \n" + response); AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).doesNotContain("Italy"); assertThat(response.tokenUsage().outputTokenCount()).isEqualTo(maximumOutputTokens); } @Test void generateTextAsJson() throws JSONException { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder().model(MODEL_NAME) .responseFormat(new ChatCompletionRequest.ResponseFormat("json_object")).build()) .build(); String response = chatModel .generate("Return a JSON object with two fields: location is Jungle and name is Jumanji."); logger.info("Response: \n" + response); JSONAssert.assertEquals(""" { "name": "Jumanji", "location": "Jungle" } """, response, JSONCompareMode.STRICT); } @Test void executeToolExplicitlyAndThenGenerateAnswer() throws JSONException { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder().build()) .build(); // Execute tool UserMessage userMessage = userMessage("2+2=?"); List<ToolSpecification> toolSpecifications = List.of(calculator); Response<AiMessage> response = chatModel.generate(List.of(userMessage), toolSpecifications); AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(1); ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest.id()).isNotBlank(); assertThat(toolExecutionRequest.name()).isEqualTo("calculator"); JSONAssert.assertEquals(""" { "first": 2, "second": 2 } """, toolExecutionRequest.arguments(), JSONCompareMode.STRICT); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(FinishReason.TOOL_EXECUTION); // Then answer ToolExecutionResultMessage toolExecutionResultMessage = ToolExecutionResultMessage.from(toolExecutionRequest, "4"); List<ChatMessage> messages = List.of(userMessage, aiMessage, toolExecutionResultMessage); Response<AiMessage> secondResponse = chatModel.generate(messages); AiMessage secondAiMessage = secondResponse.content(); assertThat(secondAiMessage.text()).contains("4"); assertThat(secondAiMessage.toolExecutionRequests()).isNull(); TokenUsage secondTokenUsage = secondResponse.tokenUsage(); assertThat(secondTokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.totalTokenCount()) .isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount()); assertThat(secondResponse.finishReason()).isEqualTo(FinishReason.STOP); } @Test void executeToolImplicitlyAndThenGenerateAnswer() throws JSONException { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder().build()) .build(); // Execute tool UserMessage userMessage = userMessage("2+2=?"); Response<AiMessage> response = chatModel.generate(List.of(userMessage), calculator); AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(1); ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest.id()).isNotBlank(); assertThat(toolExecutionRequest.name()).isEqualTo("calculator"); JSONAssert.assertEquals(""" { "first": 2, "second": 2 } """, toolExecutionRequest.arguments(), JSONCompareMode.STRICT); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(STOP); // Not sure if a bug in OpenAI or stop is expected here // Then answer ToolExecutionResultMessage toolExecutionResultMessage = ToolExecutionResultMessage.from(toolExecutionRequest, "4"); List<ChatMessage> messages = List.of(userMessage, aiMessage, toolExecutionResultMessage); Response<AiMessage> secondResponse = chatModel.generate(messages); AiMessage secondAiMessage = secondResponse.content(); assertThat(secondAiMessage.text()).contains("4"); assertThat(secondAiMessage.toolExecutionRequests()).isNull(); TokenUsage secondTokenUsage = secondResponse.tokenUsage(); assertThat(secondTokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.totalTokenCount()) .isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount()); assertThat(secondResponse.finishReason()).isEqualTo(STOP); } @Test void executeMultipleToolsInParallelThenAnswer() throws JSONException { OpenAiChatModel chatModel = OpenAiChatModel.builder() .client(openAiClient) .options(OpenAiChatOptions.builder() .model("gpt-3.5-turbo-1106") .temperature(0.0) .build()) .build(); // Execute multiple tools UserMessage userMessage = userMessage("2+2=? 3+3=?"); List<ToolSpecification> toolSpecifications = List.of(calculator); Response<AiMessage> response = chatModel.generate(List.of(userMessage), toolSpecifications); AiMessage aiMessage = response.content(); assertThat(aiMessage.text()).isNull(); assertThat(aiMessage.toolExecutionRequests()).hasSize(2); ToolExecutionRequest toolExecutionRequest1 = aiMessage.toolExecutionRequests().get(0); assertThat(toolExecutionRequest1.name()).isEqualTo("calculator"); JSONAssert.assertEquals(""" { "first": 2, "second": 2 } """, toolExecutionRequest1.arguments(), JSONCompareMode.STRICT); ToolExecutionRequest toolExecutionRequest2 = aiMessage.toolExecutionRequests().get(1); assertThat(toolExecutionRequest2.name()).isEqualTo("calculator"); JSONAssert.assertEquals(""" { "first": 3, "second": 3 } """, toolExecutionRequest2.arguments(), JSONCompareMode.STRICT); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(TOOL_EXECUTION); // Then answer ToolExecutionResultMessage toolExecutionResultMessage1 = ToolExecutionResultMessage.from(toolExecutionRequest1, "4"); ToolExecutionResultMessage toolExecutionResultMessage2 = ToolExecutionResultMessage.from(toolExecutionRequest2, "6"); List<ChatMessage> messages = List.of(userMessage, aiMessage, toolExecutionResultMessage1, toolExecutionResultMessage2); Response<AiMessage> secondResponse = chatModel.generate(messages); AiMessage secondAiMessage = secondResponse.content(); assertThat(secondAiMessage.text()).contains("4", "6"); assertThat(secondAiMessage.toolExecutionRequests()).isNull(); TokenUsage secondTokenUsage = secondResponse.tokenUsage(); assertThat(secondTokenUsage.inputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(secondTokenUsage.totalTokenCount()) .isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount()); assertThat(secondResponse.finishReason()).isEqualTo(STOP); } }
[ "dev.langchain4j.agent.tool.ToolSpecification.builder" ]
[((2103, 2329), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2103, 2308), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2103, 2263), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2103, 2219), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2103, 2162), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2490, 2541), 'io.thomasvitale.langchain4j.spring.openai.client.OpenAiClientConfig.builder'), ((2490, 2533), 'io.thomasvitale.langchain4j.spring.openai.client.OpenAiClientConfig.builder'), ((3562, 3600), 'io.thomasvitale.langchain4j.spring.openai.api.chat.ChatCompletionFinishReason.STOP.name'), ((4590, 4630), 'io.thomasvitale.langchain4j.spring.openai.api.chat.ChatCompletionFinishReason.LENGTH.name')]
package org.feuyeux.ai.langchain.hellolangchain; import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument; import static java.time.Duration.ofSeconds; import static org.feuyeux.ai.langchain.hellolangchain.OpenApi.getKey; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.parser.TextDocumentParser; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import java.nio.file.Paths; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @Slf4j public class ChainsTest { public static final String SIMPSON_S_ADVENTURES_TXT = "src/test/resources/simpson's_adventures.txt"; @AfterEach public void tearDown() throws InterruptedException { TimeUnit.SECONDS.sleep(25); } @Test public void givenChainWithDocument_whenPrompted_thenValidResponse() { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(500, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Document document = loadDocument(Paths.get(SIMPSON_S_ADVENTURES_TXT), new TextDocumentParser()); ingestor.ingest(document); ChatLanguageModel chatModel = OpenAiChatModel.builder().apiKey(getKey()).timeout(ofSeconds(60)).build(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatModel) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .promptTemplate( PromptTemplate.from( "Answer the following question to the best of your ability: {{question}}\n\nBase your answer on the following information:\n{{information}}")) .build(); String answer = chain.execute("Who is Simpson?"); log.info(answer); Assertions.assertNotNull(answer); } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1559, 1585), 'java.util.concurrent.TimeUnit.SECONDS.sleep'), ((1872, 2080), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1872, 2059), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1872, 2015), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1872, 1971), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2258, 2331), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2258, 2323), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2258, 2300), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2383, 2867), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2383, 2846), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2383, 2617), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2383, 2548), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2383, 2463), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')]
package dev.langchain4j.model.vertexai; import com.google.cloud.vertexai.VertexAI; import com.google.cloud.vertexai.api.GenerationConfig; import com.google.cloud.vertexai.generativeai.GenerativeModel; import dev.langchain4j.agent.tool.JsonSchemaProperty; import dev.langchain4j.agent.tool.Tool; import dev.langchain4j.agent.tool.ToolExecutionRequest; import dev.langchain4j.agent.tool.ToolSpecification; import dev.langchain4j.data.message.*; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.output.FinishReason; import dev.langchain4j.model.output.Response; import dev.langchain4j.model.output.TokenUsage; import dev.langchain4j.service.AiServices; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.Base64; import java.util.List; import static dev.langchain4j.internal.Utils.readBytes; import static dev.langchain4j.model.output.FinishReason.LENGTH; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.*; class VertexAiGeminiChatModelIT { static final String CAT_IMAGE_URL = "https://upload.wikimedia.org/wikipedia/commons/e/e9/Felis_silvestris_silvestris_small_gradual_decrease_of_quality.png"; static final String DICE_IMAGE_URL = "https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png"; ChatLanguageModel model = VertexAiGeminiChatModel.builder() .project(System.getenv("GCP_PROJECT_ID")) .location(System.getenv("GCP_LOCATION")) .modelName("gemini-pro") .build(); ChatLanguageModel visionModel = VertexAiGeminiChatModel.builder() .project(System.getenv("GCP_PROJECT_ID")) .location(System.getenv("GCP_LOCATION")) .modelName("gemini-pro-vision") .build(); @Test void should_generate_response() { // given UserMessage userMessage = UserMessage.from("What is the capital of Germany?"); // when Response<AiMessage> response = model.generate(userMessage); System.out.println(response); // then assertThat(response.content().text()).contains("Berlin"); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(7); assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(FinishReason.STOP); } @Test void should_deny_system_message() { // given SystemMessage systemMessage = SystemMessage.from("Be polite"); UserMessage userMessage = UserMessage.from("Tell me a joke"); // when-then assertThatThrownBy(() -> model.generate(systemMessage, userMessage)) .isExactlyInstanceOf(IllegalArgumentException.class) .hasMessage("SystemMessage is currently not supported by Gemini"); } @Test void should_respect_maxOutputTokens() { // given ChatLanguageModel model = VertexAiGeminiChatModel.builder() .project(System.getenv("GCP_PROJECT_ID")) .location(System.getenv("GCP_LOCATION")) .modelName("gemini-pro") .maxOutputTokens(1) .build(); UserMessage userMessage = UserMessage.from("Tell me a joke"); // when Response<AiMessage> response = model.generate(userMessage); System.out.println(response); // then assertThat(response.content().text()).isNotBlank(); TokenUsage tokenUsage = response.tokenUsage(); assertThat(tokenUsage.inputTokenCount()).isEqualTo(4); assertThat(tokenUsage.outputTokenCount()).isEqualTo(1); assertThat(tokenUsage.totalTokenCount()) .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); assertThat(response.finishReason()).isEqualTo(LENGTH); } @Test void should_allow_custom_generativeModel_and_generationConfig() { // given VertexAI vertexAi = new VertexAI(System.getenv("GCP_PROJECT_ID"), System.getenv("GCP_LOCATION")); GenerativeModel generativeModel = new GenerativeModel("gemini-pro", vertexAi); GenerationConfig generationConfig = GenerationConfig.getDefaultInstance(); ChatLanguageModel model = new VertexAiGeminiChatModel(generativeModel, generationConfig); UserMessage userMessage = UserMessage.from("What is the capital of Germany?"); // when Response<AiMessage> response = model.generate(userMessage); System.out.println(response); // then assertThat(response.content().text()).contains("Berlin"); } @Test void should_accept_text_and_image_from_public_url() { // given UserMessage userMessage = UserMessage.from( ImageContent.from(CAT_IMAGE_URL), TextContent.from("What do you see? Reply in one word.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()).containsIgnoringCase("cat"); } @Test void should_accept_text_and_image_from_google_storage_url() { // given UserMessage userMessage = UserMessage.from( ImageContent.from("gs://langchain4j-test/cat.png"), TextContent.from("What do you see? Reply in one word.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()).containsIgnoringCase("cat"); } @Test void should_accept_text_and_base64_image() { // given String base64Data = Base64.getEncoder().encodeToString(readBytes(CAT_IMAGE_URL)); UserMessage userMessage = UserMessage.from( ImageContent.from(base64Data, "image/png"), TextContent.from("What do you see? Reply in one word.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()).containsIgnoringCase("cat"); } @Test void should_accept_text_and_multiple_images_from_public_urls() { // given UserMessage userMessage = UserMessage.from( ImageContent.from(CAT_IMAGE_URL), ImageContent.from(DICE_IMAGE_URL), TextContent.from("What do you see? Reply with one word per image.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()) .containsIgnoringCase("cat") .containsIgnoringCase("dice"); } @Test void should_accept_text_and_multiple_images_from_google_storage_urls() { // given UserMessage userMessage = UserMessage.from( ImageContent.from("gs://langchain4j-test/cat.png"), ImageContent.from("gs://langchain4j-test/dice.png"), TextContent.from("What do you see? Reply with one word per image.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()) .containsIgnoringCase("cat") .containsIgnoringCase("dice"); } @Test void should_accept_text_and_multiple_base64_images() { // given String catBase64Data = Base64.getEncoder().encodeToString(readBytes(CAT_IMAGE_URL)); String diceBase64Data = Base64.getEncoder().encodeToString(readBytes(DICE_IMAGE_URL)); UserMessage userMessage = UserMessage.from( ImageContent.from(catBase64Data, "image/png"), ImageContent.from(diceBase64Data, "image/png"), TextContent.from("What do you see? Reply with one word per image.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()) .containsIgnoringCase("cat") .containsIgnoringCase("dice"); } @Test void should_accept_text_and_multiple_images_from_different_sources() { // given UserMessage userMessage = UserMessage.from( ImageContent.from(CAT_IMAGE_URL), ImageContent.from("gs://langchain4j-test/dog.jpg"), ImageContent.from(Base64.getEncoder().encodeToString(readBytes(DICE_IMAGE_URL)), "image/png"), TextContent.from("What do you see? Reply with one word per image.") ); // when Response<AiMessage> response = visionModel.generate(userMessage); // then assertThat(response.content().text()) .containsIgnoringCase("cat") .containsIgnoringCase("dog") .containsIgnoringCase("dice"); } @Test void should_accept_tools_for_function_calling() { // given ChatLanguageModel model = VertexAiGeminiChatModel.builder() .project(System.getenv("GCP_PROJECT_ID")) .location(System.getenv("GCP_LOCATION")) .modelName("gemini-pro") .build(); ToolSpecification weatherToolSpec = ToolSpecification.builder() .name("getWeatherForecast") .description("Get the weather forecast for a location") .addParameter("location", JsonSchemaProperty.STRING, JsonSchemaProperty.description("the location to get the weather forecast for")) .build(); List<ChatMessage> allMessages = new ArrayList<>(); UserMessage weatherQuestion = UserMessage.from("What is the weather in Paris?"); System.out.println("Question: " + weatherQuestion.text()); allMessages.add(weatherQuestion); // when Response<AiMessage> messageResponse = model.generate(allMessages, weatherToolSpec); // then assertThat(messageResponse.content().hasToolExecutionRequests()).isTrue(); ToolExecutionRequest toolExecutionRequest = messageResponse.content().toolExecutionRequests().get(0); assertThat(toolExecutionRequest.arguments()).contains("Paris"); assertThat(toolExecutionRequest.name()).isEqualTo("getWeatherForecast"); allMessages.add(messageResponse.content()); // when (feeding the function return value back) ToolExecutionResultMessage toolExecResMsg = ToolExecutionResultMessage.from(toolExecutionRequest, "{\"location\":\"Paris\",\"forecast\":\"sunny\", \"temperature\": 20}"); allMessages.add(toolExecResMsg); Response<AiMessage> weatherResponse = model.generate(allMessages); // then System.out.println("Answer: " + weatherResponse.content().text()); assertThat(weatherResponse.content().text()).containsIgnoringCase("sunny"); } static class Calculator { @Tool("Adds two given numbers") double add(double a, double b) { System.out.printf("Called add(%s, %s)%n", a, b); return a + b; } @Tool("Multiplies two given numbers") String multiply(double a, double b) { System.out.printf("Called multiply(%s, %s)%n", a, b); return String.valueOf(a * b); } } interface Assistant { String chat(String userMessage); } @Test void should_use_tools_with_AiService() { // given Calculator calculator = spy(new Calculator()); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(model) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .tools(calculator) .build(); // when String answer = assistant.chat("How much is 74589613588 + 4786521789?"); // then // assertThat(answer).contains("79376135377"); TODO verify(calculator).add(74589613588.0, 4786521789.0); verifyNoMoreInteractions(calculator); } @Test void should_use_tools_with_AiService_2() { // given Calculator calculator = spy(new Calculator()); Assistant assistant = AiServices.builder(Assistant.class) .chatLanguageModel(model) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .tools(calculator) .build(); // when String answer = assistant.chat("How much is 257 * 467?"); // then // assertThat(answer).contains("120019"); TODO verify(calculator).multiply(257, 467); verifyNoMoreInteractions(calculator); } }
[ "dev.langchain4j.agent.tool.ToolSpecification.builder", "dev.langchain4j.service.AiServices.builder" ]
[((6027, 6087), 'java.util.Base64.getEncoder'), ((7832, 7892), 'java.util.Base64.getEncoder'), ((7926, 7987), 'java.util.Base64.getEncoder'), ((8823, 8884), 'java.util.Base64.getEncoder'), ((9670, 10011), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((9670, 9986), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((9670, 9813), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((9670, 9741), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((12012, 12222), 'dev.langchain4j.service.AiServices.builder'), ((12012, 12197), 'dev.langchain4j.service.AiServices.builder'), ((12012, 12162), 'dev.langchain4j.service.AiServices.builder'), ((12012, 12089), 'dev.langchain4j.service.AiServices.builder'), ((12675, 12885), 'dev.langchain4j.service.AiServices.builder'), ((12675, 12860), 'dev.langchain4j.service.AiServices.builder'), ((12675, 12825), 'dev.langchain4j.service.AiServices.builder'), ((12675, 12752), 'dev.langchain4j.service.AiServices.builder')]
package io.thomasvitale.langchain4j.spring.core.tool.jackson; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import dev.langchain4j.agent.tool.ToolExecutionRequest; import org.json.JSONException; import org.junit.jupiter.api.Test; import org.skyscreamer.jsonassert.JSONAssert; import org.skyscreamer.jsonassert.JSONCompareMode; import io.thomasvitale.langchain4j.spring.core.json.jackson.LangChain4jJacksonProvider; import static org.assertj.core.api.Assertions.assertThat; /** * Unit tests for {@link ToolExecutionRequestMixin}. */ class ToolExecutionRequestMixinTests { private final ObjectMapper objectMapper = LangChain4jJacksonProvider.getObjectMapper(); @Test void serializeAndDeserializeToolExecutionRequest() throws JsonProcessingException, JSONException { var toolExecutionRequest = ToolExecutionRequest.builder() .id("QUERY_DB") .name("queryDatabase") .arguments("{}") .build(); var json = objectMapper.writeValueAsString(toolExecutionRequest); JSONAssert.assertEquals(""" { "id": "QUERY_DB", "name": "queryDatabase", "arguments": "{}" } """, json, JSONCompareMode.STRICT); var deserializedToolExecutionRequest = objectMapper.readValue(json, ToolExecutionRequest.class); assertThat(deserializedToolExecutionRequest).isEqualTo(toolExecutionRequest); } }
[ "dev.langchain4j.agent.tool.ToolExecutionRequest.builder" ]
[((886, 1029), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((886, 1008), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((886, 979), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((886, 944), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder')]
package com.baeldung.langchain; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.time.Duration.ofSeconds; import static java.util.stream.Collectors.joining; import static org.junit.Assert.assertNotNull; import java.nio.file.Paths; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; public class ChatWithDocumentLiveTest { private static final Logger logger = LoggerFactory.getLogger(ChatWithDocumentLiveTest.class); @Test public void givenDocument_whenPrompted_thenValidResponse() { Document document = loadDocument(Paths.get("src/test/resources/example-files/simpson's_adventures.txt")); DocumentSplitter splitter = DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO)); List<TextSegment> segments = splitter.split(document); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); List<Embedding> embeddings = embeddingModel.embedAll(segments) .content(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); String question = "Who is Simpson?"; Embedding questionEmbedding = embeddingModel.embed(question) .content(); int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); PromptTemplate promptTemplate = PromptTemplate.from("Answer the following question to the best of your ability:\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Base your answer on the following information:\n" + "{{information}}"); String information = relevantEmbeddings.stream() .map(match -> match.embedded() .text()) .collect(joining("\n\n")); Map<String, Object> variables = new HashMap<>(); variables.put("question", question); variables.put("information", information); Prompt prompt = promptTemplate.apply(variables); ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(Constants.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()) .content(); logger.info(aiMessage.text()); assertNotNull(aiMessage.text()); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((3188, 3316), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3188, 3295), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3188, 3259), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package com.baeldung.langchain; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO; import static java.time.Duration.ofSeconds; import static java.util.stream.Collectors.joining; import static org.junit.Assert.assertNotNull; import java.nio.file.Paths; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.DocumentSplitter; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.AiMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.Prompt; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.model.openai.OpenAiTokenizer; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; public class ChatWithDocumentLiveTest { private static final Logger logger = LoggerFactory.getLogger(ChatWithDocumentLiveTest.class); @Test public void givenDocument_whenPrompted_thenValidResponse() { Document document = loadDocument(Paths.get("src/test/resources/example-files/simpson's_adventures.txt")); DocumentSplitter splitter = DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO)); List<TextSegment> segments = splitter.split(document); EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); List<Embedding> embeddings = embeddingModel.embedAll(segments) .content(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); embeddingStore.addAll(embeddings, segments); String question = "Who is Simpson?"; Embedding questionEmbedding = embeddingModel.embed(question) .content(); int maxResults = 3; double minScore = 0.7; List<EmbeddingMatch<TextSegment>> relevantEmbeddings = embeddingStore.findRelevant(questionEmbedding, maxResults, minScore); PromptTemplate promptTemplate = PromptTemplate.from("Answer the following question to the best of your ability:\n" + "\n" + "Question:\n" + "{{question}}\n" + "\n" + "Base your answer on the following information:\n" + "{{information}}"); String information = relevantEmbeddings.stream() .map(match -> match.embedded() .text()) .collect(joining("\n\n")); Map<String, Object> variables = new HashMap<>(); variables.put("question", question); variables.put("information", information); Prompt prompt = promptTemplate.apply(variables); ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(Constants.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()) .content(); logger.info(aiMessage.text()); assertNotNull(aiMessage.text()); } }
[ "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((3188, 3316), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3188, 3295), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3188, 3259), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package com.baeldung.langchain; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static java.time.Duration.ofSeconds; import static org.junit.Assert.assertNotNull; import java.nio.file.Paths; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; public class ChainWithDocumentLiveTest { private static final Logger logger = LoggerFactory.getLogger(ChainWithDocumentLiveTest.class); @Test public void givenChainWithDocument_whenPrompted_thenValidResponse() { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(500, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Document document = loadDocument(Paths.get("src/test/resources/example-files/simpson's_adventures.txt")); ingestor.ingest(document); ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(Constants.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatModel) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .promptTemplate(PromptTemplate.from("Answer the following question to the best of your ability: {{question}}\n\nBase your answer on the following information:\n{{information}}")) .build(); String answer = chain.execute("Who is Simpson?"); logger.info(answer); assertNotNull(answer); } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1556, 1764), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1743), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1699), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1655), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1955, 2083), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1955, 2062), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1955, 2026), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2131, 2577), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2556), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2365), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2296), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2211), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')]
package com.baeldung.langchain; import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument; import static java.time.Duration.ofSeconds; import static org.junit.Assert.assertNotNull; import java.nio.file.Paths; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.input.PromptTemplate; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; public class ChainWithDocumentLiveTest { private static final Logger logger = LoggerFactory.getLogger(ChainWithDocumentLiveTest.class); @Test public void givenChainWithDocument_whenPrompted_thenValidResponse() { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(500, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); Document document = loadDocument(Paths.get("src/test/resources/example-files/simpson's_adventures.txt")); ingestor.ingest(document); ChatLanguageModel chatModel = OpenAiChatModel.builder() .apiKey(Constants.OPENAI_API_KEY) .timeout(ofSeconds(60)) .build(); ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder() .chatLanguageModel(chatModel) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .promptTemplate(PromptTemplate.from("Answer the following question to the best of your ability: {{question}}\n\nBase your answer on the following information:\n{{information}}")) .build(); String answer = chain.execute("Who is Simpson?"); logger.info(answer); assertNotNull(answer); } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((1556, 1764), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1743), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1699), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1556, 1655), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1955, 2083), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1955, 2062), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1955, 2026), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2131, 2577), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2556), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2365), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2296), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2131, 2211), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')]
package io.quarkiverse.langchain4j.sample; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import jakarta.annotation.PreDestroy; import jakarta.enterprise.context.RequestScoped; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.ChatMemoryProvider; import dev.langchain4j.memory.chat.MessageWindowChatMemory; @RequestScoped public class ChatMemoryBean implements ChatMemoryProvider { private final Map<Object, ChatMemory> memories = new ConcurrentHashMap<>(); @Override public ChatMemory get(Object memoryId) { return memories.computeIfAbsent(memoryId, id -> MessageWindowChatMemory.builder() .maxMessages(20) .id(memoryId) .build()); } @PreDestroy public void close() { memories.clear(); } }
[ "dev.langchain4j.memory.chat.MessageWindowChatMemory.builder" ]
[((632, 753), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((632, 728), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((632, 698), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder')]
import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.StreamingChatLanguageModel; import dev.langchain4j.model.openai.OpenAiStreamingChatModel; import dev.langchain4j.service.AiServices; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; public class AnswerService { private static final Logger LOGGER = LogManager.getLogger(AnswerService.class); private Assistant assistant; public void init(SearchAction action) { action.appendAnswer("Initiating..."); initChat(action); } private void initChat(SearchAction action) { StreamingChatLanguageModel model = OpenAiStreamingChatModel.withApiKey(ApiKeys.OPENAI_API_KEY); assistant = AiServices.builder(Assistant.class) .streamingChatLanguageModel(model) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .build(); action.appendAnswer("Done"); action.setFinished(); } void ask(SearchAction action) { LOGGER.info("Asking question '" + action.getQuestion() + "'"); var responseHandler = new CustomStreamingResponseHandler(action); assistant.chat(action.getQuestion()) .onNext(responseHandler::onNext) .onComplete(responseHandler::onComplete) .onError(responseHandler::onError) .start(); } }
[ "dev.langchain4j.service.AiServices.builder" ]
[((758, 942), 'dev.langchain4j.service.AiServices.builder'), ((758, 917), 'dev.langchain4j.service.AiServices.builder'), ((758, 844), 'dev.langchain4j.service.AiServices.builder')]
package com.example.demo; import dev.langchain4j.chain.ConversationalChain; import dev.langchain4j.data.embedding.Embedding; import dev.langchain4j.data.message.SystemMessage; import dev.langchain4j.data.message.UserMessage; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.image.ImageModel; import dev.langchain4j.store.embedding.EmbeddingMatch; import dev.langchain4j.store.embedding.EmbeddingStore; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.GetMapping; import java.util.Arrays; import java.util.List; import static java.util.Arrays.asList; @Controller public class DemoController { private final ImageModel imageModel; private final ChatLanguageModel chatLanguageModel; private final EmbeddingModel embeddingModel; private final EmbeddingStore<TextSegment> embeddingStore; public DemoController(ImageModel imageModel, ChatLanguageModel chatLanguageModel, EmbeddingModel embeddingModel, EmbeddingStore<TextSegment> embeddingStore) { this.imageModel = imageModel; this.chatLanguageModel = chatLanguageModel; this.embeddingModel = embeddingModel; this.embeddingStore = embeddingStore; } @GetMapping("/") public String demo() { return "demo"; } @GetMapping("/1") String createImage(Model model) { String question = "A coffee mug in Paris, France"; String answer = imageModel.generate(question).content().url().toString(); model.addAttribute("demo", "Demo 1: image generation"); model.addAttribute("question", question); model.addAttribute("answer", answer); return "demo"; } @GetMapping("/2") String getAnswer(Model model) { String question = "Who painted the Mona Lisa?"; String answer = chatLanguageModel.generate(UserMessage.from(question)).content().text(); model.addAttribute("demo", "Demo 2: simple question"); model.addAttribute("question", question); model.addAttribute("answer", answer); return "demo"; } @GetMapping("/3") String getAnswerWithSystemMessage(Model model) { SystemMessage systemMessage = SystemMessage.from("I am the king of France. " + "Talk to me with extreme deference."); String question = "Who painted the Mona Lisa?"; String answer = chatLanguageModel.generate(systemMessage, UserMessage.from(question)).content().text(); model.addAttribute("demo", "Demo 3: advanced question"); model.addAttribute("question", question); model.addAttribute("answer", answer); return "demo"; } @GetMapping("/4") String getAnswerWithLocation(Model model) { String question = "Where can you see this painting?"; String answer = chatLanguageModel.generate(UserMessage.from(question)).content().text(); model.addAttribute("demo", "Demo 4: A question without memory"); model.addAttribute("question", question); model.addAttribute("answer", answer); return "demo"; } @GetMapping("/5") String getAnswerUsingConversationChain(Model model) { String context = "Who painted the Mona Lisa?"; String question = "Where can you see this painting?"; ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(20); ConversationalChain chain = ConversationalChain.builder() .chatLanguageModel(chatLanguageModel) .chatMemory(chatMemory) .build(); chain.execute(context); String answer = chain.execute(question); model.addAttribute("demo", "Demo 5: A question with memory"); model.addAttribute("question", question); model.addAttribute("answer", answer); return "demo"; } @GetMapping("/6") String loadVectorDatabase(Model model) { String content1 = "banana"; String content2 = "computer"; String content3 = "apple"; String content4 = "pizza"; String content5 = "strawberry"; String content6 = "chess"; List<String> contents = asList(content1, content2, content3, content4, content5, content6); for (String content : contents) { TextSegment textSegment = TextSegment.from(content); Embedding embedding = embeddingModel.embed(content).content(); embeddingStore.add(embedding, textSegment); } model.addAttribute("demo", "Demo 6: Data ingestion"); model.addAttribute("question", "Ingesting data into the vector database"); model.addAttribute("answer", "OK"); return "demo"; } @GetMapping("/7") String queryVectorDatabase(Model model) { String question = "fruit"; Embedding relevantEmbedding = embeddingModel.embed(question).content(); List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(relevantEmbedding, 3); String answer = relevant.get(0).embedded().text() + "\n"; answer += relevant.get(1).embedded().text() + "\n"; answer += relevant.get(2).embedded().text() + "\n"; model.addAttribute("demo", "Demo 7: Querying the vector database"); model.addAttribute("question", question); model.addAttribute("answer", answer); return "demo"; } @GetMapping("/8") String queryVectorDatabaseWithDetails(Model model) { String question = "fruit"; Embedding relevantEmbedding = embeddingModel.embed(question).content(); List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(relevantEmbedding, 3); String answer = relevant.get(0).embedded().text() + " | " + Arrays.toString(relevant.get(0).embedding().vector()) + "\n"; answer += relevant.get(1).embedded().text() + " | " + Arrays.toString(relevant.get(1).embedding().vector()) + "\n"; answer += relevant.get(2).embedded().text() + " | " + Arrays.toString(relevant.get(2).embedding().vector()) + "\n"; model.addAttribute("demo", "Demo 8: Getting the vectors from the vector database"); model.addAttribute("question", question); model.addAttribute("answer", answer); return "demo"; } }
[ "dev.langchain4j.chain.ConversationalChain.builder" ]
[((3673, 3821), 'dev.langchain4j.chain.ConversationalChain.builder'), ((3673, 3796), 'dev.langchain4j.chain.ConversationalChain.builder'), ((3673, 3756), 'dev.langchain4j.chain.ConversationalChain.builder')]
package co.elastic.examples; import dev.langchain4j.model.ollama.OllamaEmbeddingModel; import dev.langchain4j.model.ollama.OllamaStreamingLanguageModel; import dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore; import org.jline.reader.LineReader; import org.jline.reader.LineReaderBuilder; import org.jline.terminal.Terminal; import org.jline.terminal.TerminalBuilder; import java.io.FileInputStream; import java.io.IOException; import java.time.Duration; import java.util.Map; import java.util.Properties; public class Components { private static final Map<String, String> config; static { try (var input = new FileInputStream("config.properties")) { var props = new Properties(); props.load(input); @SuppressWarnings("unchecked") Map<String, String> map = (Map<String, String>) (Map<?, ?>) props; config = map; } catch (Exception e) { throw new RuntimeException(e); } } public static LineReader createLineReader() throws IOException { Terminal terminal = TerminalBuilder.builder().dumb(true).build(); return LineReaderBuilder.builder().terminal(terminal).build(); } public static OllamaEmbeddingModel createEmbeddingModel() { return new OllamaEmbeddingModel( config.get("ollama-url"), Duration.ofSeconds(30), config.get("ollama-embed-model"), 1 ); } public static OllamaStreamingLanguageModel createLanguageModel() { return new OllamaStreamingLanguageModel( config.get("ollama-url"), Duration.ofSeconds(30), config.get("ollama-language-model"), null ); } public static ElasticsearchEmbeddingStore createStore(String index) { var config = readConfig(); return ElasticsearchEmbeddingStore.builder() .serverUrl(config.get("es-url")) .userName(config.get("es-login")) .password(config.get("es-password")) .dimension(Integer.parseInt(config.get("ollama-embed-dimensions"))) .indexName(index) .build(); } public static Map<String, String> readConfig() { try (var input = new FileInputStream("config.properties")) { var props = new Properties(); props.load(input); @SuppressWarnings("unchecked") Map<String, String> map = (Map<String, String>) (Map<?, ?>) props; return map; } catch (Exception e) { throw new RuntimeException(e); } } }
[ "dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder" ]
[((1106, 1150), 'org.jline.terminal.TerminalBuilder.builder'), ((1106, 1142), 'org.jline.terminal.TerminalBuilder.builder'), ((1167, 1221), 'org.jline.reader.LineReaderBuilder.builder'), ((1167, 1213), 'org.jline.reader.LineReaderBuilder.builder'), ((1889, 2197), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1889, 2176), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1889, 2146), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1889, 2066), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1889, 2017), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1889, 1971), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder')]
package com.sivalabs.tcguidesbot.config; import dev.langchain4j.chain.ConversationalRetrievalChain; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.UrlDocumentLoader; import dev.langchain4j.data.document.splitter.DocumentSplitters; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel; import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.retriever.EmbeddingStoreRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import java.time.Duration; import java.util.List; @Configuration public class AppConfig { private static final Logger log = LoggerFactory.getLogger(AppConfig.class); private static final List<String> guideUrls = List.of( "https://testcontainers.com/guides/replace-h2-with-real-database-for-testing/", "https://testcontainers.com/guides/configuration-of-services-running-in-container/", "https://testcontainers.com/guides/testing-spring-boot-kafka-listener-using-testcontainers/", "https://testcontainers.com/guides/getting-started-with-testcontainers-for-java/", "https://testcontainers.com/guides/testing-rest-api-integrations-in-micronaut-apps-using-wiremock/", "https://testcontainers.com/guides/working-with-jooq-flyway-using-testcontainers/" ); private final ApplicationProperties properties; public AppConfig(ApplicationProperties properties) { this.properties = properties; } @Bean public ConversationalRetrievalChain chain() { EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel(); EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder() .documentSplitter(DocumentSplitters.recursive(500, 0)) .embeddingModel(embeddingModel) .embeddingStore(embeddingStore) .build(); log.info("Ingesting documents..."); List<Document> documents = guideUrls.stream().map(UrlDocumentLoader::load).toList(); ingestor.ingest(documents); log.info("Ingested {} documents", documents.size()); return ConversationalRetrievalChain.builder() .chatLanguageModel(OpenAiChatModel.builder() .apiKey(properties.apiKey()) .timeout(Duration.ofSeconds(30)) .build() ) .retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel)) // .chatMemory() // you can override default chat memory // .promptTemplate() // you can override default prompt template .build(); } }
[ "dev.langchain4j.chain.ConversationalRetrievalChain.builder", "dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((2145, 2369), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2145, 2344), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2145, 2296), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2145, 2248), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2622, 3150), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2622, 2971), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2622, 2882), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2696, 2864), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2696, 2831), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2696, 2774), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')]
package com.example.interlang; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.openai.OpenAiChatModel; import dev.langchain4j.service.AiServices; import dev.langchain4j.service.SystemMessage; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; interface Assistant { @SystemMessage("You are a FHIR assistant AI. You can access various tools to access FHIR resources to chat about a patient.") String chat(String userMessage); } @Service public class AssistantService implements Assistant { private final Assistant assistant; private final FhirTools fhirTools; public AssistantService(@Value("${fhir.server.url}") String fhirServerUrl, @Value("${fhir.api.key}") String fhirApiKey, @Value("${openai.api.key}") String openAiApiKey) { this.fhirTools = new FhirTools(fhirServerUrl, fhirApiKey); OpenAiChatModel chatModel = OpenAiChatModel.builder() .apiKey(openAiApiKey) .modelName("gpt-4") .build(); this.assistant = AiServices.builder(Assistant.class) .chatLanguageModel(chatModel) .tools(this.fhirTools) .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) .build(); } @Override public String chat(String userMessage) { return this.assistant.chat(userMessage); } }
[ "dev.langchain4j.service.AiServices.builder", "dev.langchain4j.model.openai.OpenAiChatModel.builder" ]
[((979, 1103), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1078), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((979, 1042), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1131, 1349), 'dev.langchain4j.service.AiServices.builder'), ((1131, 1324), 'dev.langchain4j.service.AiServices.builder'), ((1131, 1251), 'dev.langchain4j.service.AiServices.builder'), ((1131, 1212), 'dev.langchain4j.service.AiServices.builder')]
package me.nzuguem.bot.configurations.clients; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.source.UrlSource; import jakarta.ws.rs.GET; import jakarta.ws.rs.Path; import jakarta.ws.rs.PathParam; import jakarta.ws.rs.Produces; import jakarta.ws.rs.core.MediaType; import org.eclipse.microprofile.config.ConfigProvider; import org.eclipse.microprofile.rest.client.inject.RegisterRestClient; import java.util.ArrayList; import java.util.List; @Path("") @RegisterRestClient(configKey = "quarkus-github-app-extension-doc-client") @Produces(MediaType.TEXT_PLAIN) public interface QuarkusGithubAppExtensionDocClient { List<String> FILE_NAMES = List.of( "commands.adoc", "create-github-app.adoc", "developer-reference.adoc", "index.adoc", "push-to-production.adoc", "register-github-app.adoc", "replay-ui.adoc", "testing.adoc", "includes/quarkus-github-app.adoc" ); @GET @Path("{fileName}") String getContent(@PathParam("fileName") String fileName); default Document getContentAsDocument(String fileName, String baseUri) { var content = this.getContent(fileName); var metadata = UrlSource.from("%s/%s".formatted(baseUri, fileName)) .metadata() .add("source", "quarkus-github-app-extension"); return new Document(content, metadata); } default List<Document> getContentAsDocument() { var documents = new ArrayList<Document>(); var baseUri = ConfigProvider.getConfig().getValue("quarkus.rest-client.quarkus-github-app-extension-doc-client.url", String.class); FILE_NAMES.forEach(fileName -> documents.add(getContentAsDocument(fileName, baseUri))); return documents; } }
[ "dev.langchain4j.data.document.source.UrlSource.from" ]
[((1196, 1339), 'dev.langchain4j.data.document.source.UrlSource.from'), ((1196, 1276), 'dev.langchain4j.data.document.source.UrlSource.from'), ((1525, 1641), 'org.eclipse.microprofile.config.ConfigProvider.getConfig')]
package org.agoncal.fascicle.langchain4j.accessing.qianfan; import dev.langchain4j.model.qianfan.QianfanChatModel; // tag::adocSkip[] /** * @author Antonio Goncalves * http://www.antoniogoncalves.org * -- */ // end::adocSkip[] public class MusicianService { public static void main(String[] args) { MusicianService musicianService = new MusicianService(); musicianService.useQianfanChatModel(); } private static final String QIANFAN_KEY = System.getenv("AZURE_OPENAI_KEY"); private static final String QIANFAN_ENDPOINT = System.getenv("AZURE_OPENAI_ENDPOINT"); private static final String AZURE_OPENAI_DEPLOYMENT_NAME = System.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"); private static final String PROMPT = "When was the first Beatles album released?"; // ############################### // ### QIANFAN CHAT MODEL ### // ############################### public void useQianfanChatModel() { System.out.println("### useAzureOpenAiuseQianfanChatModelChatModel"); // tag::adocSnippet[] QianfanChatModel model = QianfanChatModel.builder() .apiKey(QIANFAN_KEY) .endpoint(QIANFAN_ENDPOINT) .temperature(0.3) .build(); // end::adocSnippet[] String completion = model.generate("When was the first Rolling Stones album released?"); System.out.println(completion); } public void useQianfanChatModelRequest() { System.out.println("### useQianfanChatModelRequest"); // tag::adocRequest[] QianfanChatModel model = QianfanChatModel.builder() .apiKey(QIANFAN_KEY) .endpoint(QIANFAN_ENDPOINT) .modelName(AZURE_OPENAI_DEPLOYMENT_NAME) .baseUrl("https://api.qianfanapi.com") .penaltyScore(0.0d) .secretKey("secretKey") .maxRetries(3) .topP(1.0d) .temperature(0.9) .logRequests(true) .logResponses(true) .build(); // end::adocRequest[] String completion = model.generate("When was the first Rolling Stones album released?"); System.out.println(completion); } }
[ "dev.langchain4j.model.qianfan.QianfanChatModel.builder" ]
[((1056, 1182), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1056, 1167), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1056, 1143), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1056, 1109), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1869), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1854), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1828), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1803), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1779), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1761), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1740), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1710), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1684), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1639), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1592), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder'), ((1505, 1558), 'dev.langchain4j.model.qianfan.QianfanChatModel.builder')]