From 47f85a2036fe804a632fcffb211defafef484fa1 Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Wed, 9 Oct 2024 11:16:08 +0100 Subject: [PATCH] add whitespace tokenizer docs Signed-off-by: Anton Rubin --- _analyzers/tokenizers/index.md | 2 +- _analyzers/tokenizers/whitespace.md | 105 ++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 _analyzers/tokenizers/whitespace.md diff --git a/_analyzers/tokenizers/index.md b/_analyzers/tokenizers/index.md index d401851f60..1abc5ee7ff 100644 --- a/_analyzers/tokenizers/index.md +++ b/_analyzers/tokenizers/index.md @@ -2,7 +2,7 @@ layout: default title: Tokenizers nav_order: 60 -has_children: false +has_children: true has_toc: false --- diff --git a/_analyzers/tokenizers/whitespace.md b/_analyzers/tokenizers/whitespace.md new file mode 100644 index 0000000000..12b7b798ae --- /dev/null +++ b/_analyzers/tokenizers/whitespace.md @@ -0,0 +1,105 @@ +--- +layout: default +title: Whitespace tokenizer +parent: Tokenizers +nav_order: 160 +--- + +# Whitespace tokenizer + +The `whitespace` tokenizer splits text based purely on whitespace characters (like spaces, tabs, and newlines). It treats each word separated by whitespace as a token, without performing any additional analysis or normalization like lowercasing or punctuation removal. + +## Example usage + +The following example request creates a new index named `my_index` and configures an analyzer with `whitespace` tokenizer: + +``` +PUT /my_index +{ + "settings": { + "analysis": { + "tokenizer": { + "whitespace_tokenizer": { + "type": "whitespace" + } + }, + "analyzer": { + "my_whitespace_analyzer": { + "type": "custom", + "tokenizer": "whitespace_tokenizer" + } + } + } + }, + "mappings": { + "properties": { + "content": { + "type": "text", + "analyzer": "my_whitespace_analyzer" + } + } + } +} +``` + +## Generated tokens + +Use the following request to examine the tokens generated using the created analyzer: + +```json +POST /my_index/_analyze +{ + "analyzer": "my_whitespace_analyzer", + "text": "OpenSearch is fast! Really fast." +} +``` +{% include copy-curl.html %} + +The response contains the generated tokens: + +```json +{ + "tokens": [ + { + "token": "OpenSearch", + "start_offset": 0, + "end_offset": 10, + "type": "word", + "position": 0 + }, + { + "token": "is", + "start_offset": 11, + "end_offset": 13, + "type": "word", + "position": 1 + }, + { + "token": "fast!", + "start_offset": 14, + "end_offset": 19, + "type": "word", + "position": 2 + }, + { + "token": "Really", + "start_offset": 20, + "end_offset": 26, + "type": "word", + "position": 3 + }, + { + "token": "fast.", + "start_offset": 27, + "end_offset": 32, + "type": "word", + "position": 4 + } + ] +} +``` + +## Configuration + +The `whitespace` tokenizer can be configured with parameter `max_token_length` which sets the maximum length of the produced token. If this length is exceeded, the token is split into multiple tokens at length configured in `max_token_length`. Default is `255` (Integer, _Optional_) +