-
Notifications
You must be signed in to change notification settings - Fork 25.6k
[ML] Introduce InferenceString wrapper object #137711
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,53 @@ | ||
| /* | ||
| * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
| * or more contributor license agreements. Licensed under the "Elastic License | ||
| * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side | ||
| * Public License v 1"; you may not use this file except in compliance with, at | ||
| * your election, the "Elastic License 2.0", the "GNU Affero General Public | ||
| * License v3.0 only", or the "Server Side Public License, v 1". | ||
| */ | ||
|
|
||
| package org.elasticsearch.inference; | ||
|
|
||
| import java.util.EnumSet; | ||
| import java.util.List; | ||
| import java.util.Objects; | ||
| import java.util.stream.Collectors; | ||
|
|
||
| /** | ||
| * This class represents a String which may be raw text, or the String representation of some other data such as an image in base64 | ||
| */ | ||
| public record InferenceString(String value, DataType dataType) { | ||
| /** | ||
| * Describes the type of data represented by an {@link InferenceString} | ||
| */ | ||
| public enum DataType { | ||
| TEXT, | ||
| IMAGE_BASE64 | ||
| } | ||
|
|
||
| private static final EnumSet<DataType> IMAGE_TYPES = EnumSet.of(DataType.IMAGE_BASE64); | ||
|
|
||
| /** | ||
| * Constructs an {@link InferenceString} with the given value and {@link DataType} | ||
| * @param value the String value | ||
| * @param dataType the type of data that the String represents | ||
| */ | ||
| public InferenceString(String value, DataType dataType) { | ||
| this.value = Objects.requireNonNull(value); | ||
| this.dataType = Objects.requireNonNull(dataType); | ||
| } | ||
|
|
||
| public boolean isImage() { | ||
| return IMAGE_TYPES.contains(dataType); | ||
| } | ||
|
|
||
| public boolean isText() { | ||
| return DataType.TEXT.equals(dataType); | ||
| } | ||
|
|
||
| public static List<String> toStringList(List<InferenceString> inferenceStrings) { | ||
| return inferenceStrings.stream().map(InferenceString::value).collect(Collectors.toList()); | ||
| } | ||
|
|
||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -15,6 +15,7 @@ | |
| import org.elasticsearch.inference.ChunkingSettings; | ||
| import org.elasticsearch.inference.ChunkingStrategy; | ||
| import org.elasticsearch.inference.InferenceServiceResults; | ||
| import org.elasticsearch.inference.InferenceString; | ||
| import org.elasticsearch.rest.RestStatus; | ||
| import org.elasticsearch.xpack.core.inference.chunking.Chunker.ChunkOffset; | ||
| import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceEmbedding; | ||
|
|
@@ -36,22 +37,26 @@ | |
| * chunks. Multiple inputs may be fit into a single batch or | ||
| * a single large input that has been chunked may spread over | ||
| * multiple batches. | ||
| * | ||
| * <p> | ||
| * The final aspect is to gather the responses from the batch | ||
| * processing and map the results back to the original element | ||
| * in the input list. | ||
| */ | ||
| public class EmbeddingRequestChunker<E extends EmbeddingResults.Embedding<E>> { | ||
|
|
||
| // Visible for testing | ||
| record Request(int inputIndex, int chunkIndex, ChunkOffset chunk, String input) { | ||
| public String chunkText() { | ||
| return input.substring(chunk.start(), chunk.end()); | ||
| record Request(int inputIndex, int chunkIndex, ChunkOffset chunk, InferenceString input) { | ||
| public InferenceString chunkText() { | ||
| if (chunk.start() == 0 && chunk.end() == input.value().length()) { | ||
| return input; | ||
| } else { | ||
| return new InferenceString(input.value().substring(chunk.start(), chunk.end()), input.dataType()); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| public record BatchRequest(List<Request> requests) { | ||
| public Supplier<List<String>> inputs() { | ||
| public Supplier<List<InferenceString>> inputs() { | ||
| return () -> requests.stream().map(Request::chunkText).collect(Collectors.toList()); | ||
| } | ||
| } | ||
|
|
@@ -107,21 +112,29 @@ public EmbeddingRequestChunker( | |
|
|
||
| List<Request> allRequests = new ArrayList<>(); | ||
| for (int inputIndex = 0; inputIndex < inputs.size(); inputIndex++) { | ||
| ChunkingSettings chunkingSettings = inputs.get(inputIndex).chunkingSettings(); | ||
| ChunkInferenceInput chunkInferenceInput = inputs.get(inputIndex); | ||
| ChunkingSettings chunkingSettings = chunkInferenceInput.chunkingSettings(); | ||
| if (chunkingSettings == null) { | ||
| chunkingSettings = defaultChunkingSettings; | ||
| } | ||
| Chunker chunker = chunkers.getOrDefault(chunkingSettings.getChunkingStrategy(), defaultChunker); | ||
| String inputString = inputs.get(inputIndex).input(); | ||
| List<ChunkOffset> chunks = chunker.chunk(inputString, chunkingSettings); | ||
| Chunker chunker; | ||
| if (chunkInferenceInput.input().isText()) { | ||
| chunker = chunkers.getOrDefault(chunkingSettings.getChunkingStrategy(), defaultChunker); | ||
| } else { | ||
| // Do not chunk non-text inputs | ||
| chunker = NoopChunker.INSTANCE; | ||
| chunkingSettings = NoneChunkingSettings.INSTANCE; | ||
| } | ||
| InferenceString inputString = chunkInferenceInput.input(); | ||
| List<ChunkOffset> chunks = chunker.chunk(inputString.value(), chunkingSettings); | ||
| int resultCount = Math.min(chunks.size(), MAX_CHUNKS); | ||
| resultEmbeddings.add(new AtomicReferenceArray<>(resultCount)); | ||
| resultOffsetStarts.add(new ArrayList<>(resultCount)); | ||
| resultOffsetEnds.add(new ArrayList<>(resultCount)); | ||
|
|
||
| for (int chunkIndex = 0; chunkIndex < chunks.size(); chunkIndex++) { | ||
| // If the number of chunks is larger than the maximum allowed value, | ||
| // scale the indices to [0, MAX) with similar number of original | ||
| // scale the indices to [0, MAX] with similar number of original | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just to confirm, the change here is because
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh, my mistake, I thought this was just a typo rather than indicating inclusive/exclusive. I learned something new today! |
||
| // chunks in the final chunks. | ||
| int targetChunkIndex = chunks.size() <= MAX_CHUNKS ? chunkIndex : chunkIndex * MAX_CHUNKS / chunks.size(); | ||
| if (resultOffsetStarts.getLast().size() <= targetChunkIndex) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we filter out DataType.IMAGE_BASE64 items?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think we should just filter out non-text inputs, because if any manage to make it into one of the two places we call this method, then there's a problem somewhere. Maybe an assert like in
EmbeddingsInput.getTextInputs()just for safety? The two classes where this method is called (inElasticsearchInternalServiceandSageMakerService) don't useEmbeddingsInput, which is why there's a slightly different flow for them.