Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 1 | /* |
Tony Mak | 6c4cc67 | 2018-09-17 11:48:50 +0100 | [diff] [blame] | 2 | * Copyright (C) 2018 The Android Open Source Project |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | // Feature processing for FFModel (feed-forward SmartSelection model). |
| 18 | |
Tony Mak | 6c4cc67 | 2018-09-17 11:48:50 +0100 | [diff] [blame] | 19 | #ifndef LIBTEXTCLASSIFIER_ANNOTATOR_FEATURE_PROCESSOR_H_ |
| 20 | #define LIBTEXTCLASSIFIER_ANNOTATOR_FEATURE_PROCESSOR_H_ |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 21 | |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 22 | #include <map> |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 23 | #include <memory> |
Lukas Zilka | e5ea2ab | 2017-10-11 10:50:05 +0200 | [diff] [blame] | 24 | #include <set> |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 25 | #include <string> |
| 26 | #include <vector> |
| 27 | |
Tony Mak | 6c4cc67 | 2018-09-17 11:48:50 +0100 | [diff] [blame] | 28 | #include "annotator/cached-features.h" |
| 29 | #include "annotator/model_generated.h" |
| 30 | #include "annotator/token-feature-extractor.h" |
| 31 | #include "annotator/tokenizer.h" |
| 32 | #include "annotator/types.h" |
| 33 | #include "utils/base/integral_types.h" |
| 34 | #include "utils/base/logging.h" |
| 35 | #include "utils/utf8/unicodetext.h" |
| 36 | #include "utils/utf8/unilib.h" |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 37 | |
Tony Mak | 6c4cc67 | 2018-09-17 11:48:50 +0100 | [diff] [blame] | 38 | namespace libtextclassifier3 { |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 39 | |
| 40 | constexpr int kInvalidLabel = -1; |
| 41 | |
| 42 | namespace internal { |
| 43 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 44 | TokenFeatureExtractorOptions BuildTokenFeatureExtractorOptions( |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 45 | const FeatureProcessorOptions* options); |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 46 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 47 | // Splits tokens that contain the selection boundary inside them. |
| 48 | // E.g. "foo{bar}@google.com" -> "foo", "bar", "@google.com" |
| 49 | void SplitTokensOnSelectionBoundaries(CodepointSpan selection, |
| 50 | std::vector<Token>* tokens); |
| 51 | |
Matt Sharifi | be876dc | 2017-03-17 17:02:43 +0100 | [diff] [blame] | 52 | // Returns the index of token that corresponds to the codepoint span. |
| 53 | int CenterTokenFromClick(CodepointSpan span, const std::vector<Token>& tokens); |
| 54 | |
| 55 | // Returns the index of token that corresponds to the middle of the codepoint |
| 56 | // span. |
| 57 | int CenterTokenFromMiddleOfSelection( |
| 58 | CodepointSpan span, const std::vector<Token>& selectable_tokens); |
| 59 | |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 60 | // Strips the tokens from the tokens vector that are not used for feature |
| 61 | // extraction because they are out of scope, or pads them so that there is |
| 62 | // enough tokens in the required context_size for all inferences with a click |
| 63 | // in relative_click_span. |
| 64 | void StripOrPadTokens(TokenSpan relative_click_span, int context_size, |
| 65 | std::vector<Token>* tokens, int* click_pos); |
| 66 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 67 | } // namespace internal |
| 68 | |
Lukas Zilka | 40c18de | 2017-04-10 17:22:22 +0200 | [diff] [blame] | 69 | // Converts a codepoint span to a token span in the given list of tokens. |
Lukas Zilka | 726b4d2 | 2017-12-13 16:37:03 +0100 | [diff] [blame] | 70 | // If snap_boundaries_to_containing_tokens is set to true, it is enough for a |
| 71 | // token to overlap with the codepoint range to be considered part of it. |
| 72 | // Otherwise it must be fully included in the range. |
| 73 | TokenSpan CodepointSpanToTokenSpan( |
| 74 | const std::vector<Token>& selectable_tokens, CodepointSpan codepoint_span, |
| 75 | bool snap_boundaries_to_containing_tokens = false); |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 76 | |
Lukas Zilka | 40c18de | 2017-04-10 17:22:22 +0200 | [diff] [blame] | 77 | // Converts a token span to a codepoint span in the given list of tokens. |
| 78 | CodepointSpan TokenSpanToCodepointSpan( |
| 79 | const std::vector<Token>& selectable_tokens, TokenSpan token_span); |
| 80 | |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 81 | // Takes care of preparing features for the span prediction model. |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 82 | class FeatureProcessor { |
| 83 | public: |
Lukas Zilka | ba849e7 | 2018-03-08 14:48:21 +0100 | [diff] [blame] | 84 | // A cache mapping codepoint spans to embedded tokens features. An instance |
| 85 | // can be provided to multiple calls to ExtractFeatures() operating on the |
| 86 | // same context (the same codepoint spans corresponding to the same tokens), |
| 87 | // as an optimization. Note that the tokenizations do not have to be |
| 88 | // identical. |
| 89 | typedef std::map<CodepointSpan, std::vector<float>> EmbeddingCache; |
| 90 | |
Tony Mak | 51a9e54 | 2018-11-02 13:36:22 +0000 | [diff] [blame^] | 91 | FeatureProcessor(const FeatureProcessorOptions* options, const UniLib* unilib) |
Tony Mak | 6c4cc67 | 2018-09-17 11:48:50 +0100 | [diff] [blame] | 92 | : unilib_(unilib), |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 93 | feature_extractor_(internal::BuildTokenFeatureExtractorOptions(options), |
| 94 | *unilib_), |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 95 | options_(options), |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 96 | tokenizer_( |
| 97 | options->tokenization_codepoint_config() != nullptr |
| 98 | ? Tokenizer({options->tokenization_codepoint_config()->begin(), |
| 99 | options->tokenization_codepoint_config()->end()}, |
| 100 | options->tokenize_on_script_change()) |
| 101 | : Tokenizer({}, /*split_on_script_change=*/false)) { |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 102 | MakeLabelMaps(); |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 103 | if (options->supported_codepoint_ranges() != nullptr) { |
| 104 | PrepareCodepointRanges({options->supported_codepoint_ranges()->begin(), |
| 105 | options->supported_codepoint_ranges()->end()}, |
| 106 | &supported_codepoint_ranges_); |
| 107 | } |
| 108 | if (options->internal_tokenizer_codepoint_ranges() != nullptr) { |
| 109 | PrepareCodepointRanges( |
| 110 | {options->internal_tokenizer_codepoint_ranges()->begin(), |
| 111 | options->internal_tokenizer_codepoint_ranges()->end()}, |
| 112 | &internal_tokenizer_codepoint_ranges_); |
| 113 | } |
Lukas Zilka | e5ea2ab | 2017-10-11 10:50:05 +0200 | [diff] [blame] | 114 | PrepareIgnoredSpanBoundaryCodepoints(); |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 115 | } |
| 116 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 117 | // Tokenizes the input string using the selected tokenization method. |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 118 | std::vector<Token> Tokenize(const std::string& text) const; |
| 119 | |
| 120 | // Same as above but takes UnicodeText. |
| 121 | std::vector<Token> Tokenize(const UnicodeText& text_unicode) const; |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 122 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 123 | // Converts a label into a token span. |
| 124 | bool LabelToTokenSpan(int label, TokenSpan* token_span) const; |
| 125 | |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 126 | // Gets the total number of selection labels. |
| 127 | int GetSelectionLabelCount() const { return label_to_selection_.size(); } |
| 128 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 129 | // Gets the string value for given collection label. |
| 130 | std::string LabelToCollection(int label) const; |
| 131 | |
| 132 | // Gets the total number of collections of the model. |
| 133 | int NumCollections() const { return collection_to_label_.size(); } |
| 134 | |
| 135 | // Gets the name of the default collection. |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 136 | std::string GetDefaultCollection() const; |
| 137 | |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 138 | const FeatureProcessorOptions* GetOptions() const { return options_; } |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 139 | |
Lukas Zilka | ba849e7 | 2018-03-08 14:48:21 +0100 | [diff] [blame] | 140 | // Retokenizes the context and input span, and finds the click position. |
| 141 | // Depending on the options, might modify tokens (split them or remove them). |
| 142 | void RetokenizeAndFindClick(const std::string& context, |
| 143 | CodepointSpan input_span, |
| 144 | bool only_use_line_with_click, |
| 145 | std::vector<Token>* tokens, int* click_pos) const; |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 146 | |
| 147 | // Same as above but takes UnicodeText. |
Lukas Zilka | ba849e7 | 2018-03-08 14:48:21 +0100 | [diff] [blame] | 148 | void RetokenizeAndFindClick(const UnicodeText& context_unicode, |
| 149 | CodepointSpan input_span, |
| 150 | bool only_use_line_with_click, |
| 151 | std::vector<Token>* tokens, int* click_pos) const; |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 152 | |
Lukas Zilka | 434442d | 2018-04-25 11:38:51 +0200 | [diff] [blame] | 153 | // Returns true if the token span has enough supported codepoints (as defined |
| 154 | // in the model config) or not and model should not run. |
| 155 | bool HasEnoughSupportedCodepoints(const std::vector<Token>& tokens, |
| 156 | TokenSpan token_span) const; |
| 157 | |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 158 | // Extracts features as a CachedFeatures object that can be used for repeated |
| 159 | // inference over token spans in the given context. |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 160 | bool ExtractFeatures(const std::vector<Token>& tokens, TokenSpan token_span, |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 161 | CodepointSpan selection_span_for_feature, |
Lukas Zilka | ba849e7 | 2018-03-08 14:48:21 +0100 | [diff] [blame] | 162 | const EmbeddingExecutor* embedding_executor, |
| 163 | EmbeddingCache* embedding_cache, int feature_vector_size, |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 164 | std::unique_ptr<CachedFeatures>* cached_features) const; |
| 165 | |
| 166 | // Fills selection_label_spans with CodepointSpans that correspond to the |
| 167 | // selection labels. The CodepointSpans are based on the codepoint ranges of |
| 168 | // given tokens. |
| 169 | bool SelectionLabelSpans( |
| 170 | VectorSpan<Token> tokens, |
| 171 | std::vector<CodepointSpan>* selection_label_spans) const; |
| 172 | |
| 173 | int DenseFeaturesCount() const { |
| 174 | return feature_extractor_.DenseFeaturesCount(); |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 175 | } |
| 176 | |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 177 | int EmbeddingSize() const { return options_->embedding_size(); } |
| 178 | |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 179 | // Splits context to several segments. |
Lukas Zilka | 726b4d2 | 2017-12-13 16:37:03 +0100 | [diff] [blame] | 180 | std::vector<UnicodeTextRange> SplitContext( |
| 181 | const UnicodeText& context_unicode) const; |
| 182 | |
Lukas Zilka | e5ea2ab | 2017-10-11 10:50:05 +0200 | [diff] [blame] | 183 | // Strips boundary codepoints from the span in context and returns the new |
| 184 | // start and end indices. If the span comprises entirely of boundary |
| 185 | // codepoints, the first index of span is returned for both indices. |
| 186 | CodepointSpan StripBoundaryCodepoints(const std::string& context, |
| 187 | CodepointSpan span) const; |
| 188 | |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 189 | // Same as above but takes UnicodeText. |
| 190 | CodepointSpan StripBoundaryCodepoints(const UnicodeText& context_unicode, |
| 191 | CodepointSpan span) const; |
| 192 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 193 | protected: |
Lukas Zilka | 26e8c2e | 2017-04-06 15:54:24 +0200 | [diff] [blame] | 194 | // Represents a codepoint range [start, end). |
| 195 | struct CodepointRange { |
| 196 | int32 start; |
| 197 | int32 end; |
| 198 | |
| 199 | CodepointRange(int32 arg_start, int32 arg_end) |
| 200 | : start(arg_start), end(arg_end) {} |
| 201 | }; |
| 202 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 203 | // Returns the class id corresponding to the given string collection |
| 204 | // identifier. There is a catch-all class id that the function returns for |
| 205 | // unknown collections. |
| 206 | int CollectionToLabel(const std::string& collection) const; |
| 207 | |
| 208 | // Prepares mapping from collection names to labels. |
| 209 | void MakeLabelMaps(); |
| 210 | |
| 211 | // Gets the number of spannable tokens for the model. |
| 212 | // |
| 213 | // Spannable tokens are those tokens of context, which the model predicts |
| 214 | // selection spans over (i.e., there is 1:1 correspondence between the output |
| 215 | // classes of the model and each of the spannable tokens). |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 216 | int GetNumContextTokens() const { return options_->context_size() * 2 + 1; } |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 217 | |
| 218 | // Converts a label into a span of codepoint indices corresponding to it |
| 219 | // given output_tokens. |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 220 | bool LabelToSpan(int label, const VectorSpan<Token>& output_tokens, |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 221 | CodepointSpan* span) const; |
| 222 | |
| 223 | // Converts a span to the corresponding label given output_tokens. |
| 224 | bool SpanToLabel(const std::pair<CodepointIndex, CodepointIndex>& span, |
| 225 | const std::vector<Token>& output_tokens, int* label) const; |
| 226 | |
| 227 | // Converts a token span to the corresponding label. |
| 228 | int TokenSpanToLabel(const std::pair<TokenIndex, TokenIndex>& span) const; |
| 229 | |
Matt Sharifi | f95c3bd | 2017-04-25 18:41:11 +0200 | [diff] [blame] | 230 | void PrepareCodepointRanges( |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 231 | const std::vector<const FeatureProcessorOptions_::CodepointRange*>& |
Matt Sharifi | f95c3bd | 2017-04-25 18:41:11 +0200 | [diff] [blame] | 232 | codepoint_ranges, |
| 233 | std::vector<CodepointRange>* prepared_codepoint_ranges); |
Lukas Zilka | 26e8c2e | 2017-04-06 15:54:24 +0200 | [diff] [blame] | 234 | |
| 235 | // Returns the ratio of supported codepoints to total number of codepoints in |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 236 | // the given token span. |
| 237 | float SupportedCodepointsRatio(const TokenSpan& token_span, |
Lukas Zilka | 26e8c2e | 2017-04-06 15:54:24 +0200 | [diff] [blame] | 238 | const std::vector<Token>& tokens) const; |
| 239 | |
Matt Sharifi | f95c3bd | 2017-04-25 18:41:11 +0200 | [diff] [blame] | 240 | // Returns true if given codepoint is covered by the given sorted vector of |
| 241 | // codepoint ranges. |
| 242 | bool IsCodepointInRanges( |
| 243 | int codepoint, const std::vector<CodepointRange>& codepoint_ranges) const; |
Lukas Zilka | 26e8c2e | 2017-04-06 15:54:24 +0200 | [diff] [blame] | 244 | |
Lukas Zilka | e5ea2ab | 2017-10-11 10:50:05 +0200 | [diff] [blame] | 245 | void PrepareIgnoredSpanBoundaryCodepoints(); |
| 246 | |
| 247 | // Counts the number of span boundary codepoints. If count_from_beginning is |
| 248 | // True, the counting will start at the span_start iterator (inclusive) and at |
| 249 | // maximum end at span_end (exclusive). If count_from_beginning is True, the |
| 250 | // counting will start from span_end (exclusive) and end at span_start |
| 251 | // (inclusive). |
| 252 | int CountIgnoredSpanBoundaryCodepoints( |
| 253 | const UnicodeText::const_iterator& span_start, |
| 254 | const UnicodeText::const_iterator& span_end, |
| 255 | bool count_from_beginning) const; |
| 256 | |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 257 | // Finds the center token index in tokens vector, using the method defined |
| 258 | // in options_. |
| 259 | int FindCenterToken(CodepointSpan span, |
| 260 | const std::vector<Token>& tokens) const; |
| 261 | |
Lukas Zilka | 40c18de | 2017-04-10 17:22:22 +0200 | [diff] [blame] | 262 | // Tokenizes the input text using ICU tokenizer. |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 263 | bool ICUTokenize(const UnicodeText& context_unicode, |
Lukas Zilka | 40c18de | 2017-04-10 17:22:22 +0200 | [diff] [blame] | 264 | std::vector<Token>* result) const; |
| 265 | |
Matt Sharifi | f95c3bd | 2017-04-25 18:41:11 +0200 | [diff] [blame] | 266 | // Takes the result of ICU tokenization and retokenizes stretches of tokens |
| 267 | // made of a specific subset of characters using the internal tokenizer. |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 268 | void InternalRetokenize(const UnicodeText& unicode_text, |
Matt Sharifi | f95c3bd | 2017-04-25 18:41:11 +0200 | [diff] [blame] | 269 | std::vector<Token>* tokens) const; |
| 270 | |
| 271 | // Tokenizes a substring of the unicode string, appending the resulting tokens |
| 272 | // to the output vector. The resulting tokens have bounds relative to the full |
| 273 | // string. Does nothing if the start of the span is negative. |
| 274 | void TokenizeSubstring(const UnicodeText& unicode_text, CodepointSpan span, |
| 275 | std::vector<Token>* result) const; |
| 276 | |
Lukas Zilka | 726b4d2 | 2017-12-13 16:37:03 +0100 | [diff] [blame] | 277 | // Removes all tokens from tokens that are not on a line (defined by calling |
| 278 | // SplitContext on the context) to which span points. |
| 279 | void StripTokensFromOtherLines(const std::string& context, CodepointSpan span, |
| 280 | std::vector<Token>* tokens) const; |
| 281 | |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 282 | // Same as above but takes UnicodeText. |
| 283 | void StripTokensFromOtherLines(const UnicodeText& context_unicode, |
| 284 | CodepointSpan span, |
| 285 | std::vector<Token>* tokens) const; |
| 286 | |
Lukas Zilka | ba849e7 | 2018-03-08 14:48:21 +0100 | [diff] [blame] | 287 | // Extracts the features of a token and appends them to the output vector. |
| 288 | // Uses the embedding cache to to avoid re-extracting the re-embedding the |
| 289 | // sparse features for the same token. |
| 290 | bool AppendTokenFeaturesWithCache(const Token& token, |
| 291 | CodepointSpan selection_span_for_feature, |
| 292 | const EmbeddingExecutor* embedding_executor, |
| 293 | EmbeddingCache* embedding_cache, |
| 294 | std::vector<float>* output_features) const; |
| 295 | |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 296 | private: |
Lukas Zilka | b23e212 | 2018-02-09 10:25:19 +0100 | [diff] [blame] | 297 | const UniLib* unilib_; |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 298 | |
| 299 | protected: |
Lukas Zilka | 6bb39a8 | 2017-04-07 19:55:11 +0200 | [diff] [blame] | 300 | const TokenFeatureExtractor feature_extractor_; |
| 301 | |
Matt Sharifi | f95c3bd | 2017-04-25 18:41:11 +0200 | [diff] [blame] | 302 | // Codepoint ranges that define what codepoints are supported by the model. |
| 303 | // NOTE: Must be sorted. |
| 304 | std::vector<CodepointRange> supported_codepoint_ranges_; |
| 305 | |
| 306 | // Codepoint ranges that define which tokens (consisting of which codepoints) |
| 307 | // should be re-tokenized with the internal tokenizer in the mixed |
| 308 | // tokenization mode. |
| 309 | // NOTE: Must be sorted. |
| 310 | std::vector<CodepointRange> internal_tokenizer_codepoint_ranges_; |
| 311 | |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 312 | private: |
Lukas Zilka | e5ea2ab | 2017-10-11 10:50:05 +0200 | [diff] [blame] | 313 | // Set of codepoints that will be stripped from beginning and end of |
| 314 | // predicted spans. |
| 315 | std::set<int32> ignored_span_boundary_codepoints_; |
| 316 | |
Lukas Zilka | 21d8c98 | 2018-01-24 11:11:20 +0100 | [diff] [blame] | 317 | const FeatureProcessorOptions* const options_; |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 318 | |
| 319 | // Mapping between token selection spans and labels ids. |
| 320 | std::map<TokenSpan, int> selection_to_label_; |
| 321 | std::vector<TokenSpan> label_to_selection_; |
| 322 | |
| 323 | // Mapping between collections and labels. |
| 324 | std::map<std::string, int> collection_to_label_; |
| 325 | |
| 326 | Tokenizer tokenizer_; |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 327 | }; |
| 328 | |
Tony Mak | 6c4cc67 | 2018-09-17 11:48:50 +0100 | [diff] [blame] | 329 | } // namespace libtextclassifier3 |
Matt Sharifi | bda09f1 | 2017-03-10 12:29:15 +0100 | [diff] [blame] | 330 | |
Tony Mak | 6c4cc67 | 2018-09-17 11:48:50 +0100 | [diff] [blame] | 331 | #endif // LIBTEXTCLASSIFIER_ANNOTATOR_FEATURE_PROCESSOR_H_ |