diff --git a/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc b/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc new file mode 100644 index 000000000..67b3c97a4 --- /dev/null +++ b/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "jinaai-index", + mappings: { + properties: { + content: { + type: "semantic_text", + inference_id: "jinaai-embeddings", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc b/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc new file mode 100644 index 000000000..12ea79855 --- /dev/null +++ b/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "jinaai-index", + query: { + semantic: { + field: "content", + query: "who inspired taking care of the sea?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc b/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc new file mode 100644 index 000000000..d5144150f --- /dev/null +++ b/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: false, + max_gaps: 1, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + match: { + query: "cold porridge", + max_gaps: 4, + ordered: true, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc b/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc new file mode 100644 index 000000000..8a6e40755 --- /dev/null +++ b/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "jinaai-index", + operations: [ + { + index: { + _index: "jinaai-index", + _id: "1", + }, + }, + { + content: + "Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades.", + }, + { + index: { + _index: "jinaai-index", + _id: "2", + }, + }, + { + content: + "She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. ", + }, + { + index: { + _index: "jinaai-index", + _id: "3", + }, + }, + { + content: + "Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists.", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc b/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc new file mode 100644 index 000000000..b0b47f121 --- /dev/null +++ b/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "jinaai-rerank", + inference_config: { + service: "jinaai", + service_settings: { + api_key: "", + model_id: "jina-reranker-v2-base-multilingual", + }, + task_settings: { + top_n: 10, + return_documents: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc b/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc new file mode 100644 index 000000000..ee14809f6 --- /dev/null +++ b/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: true, + max_gaps: 1, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + match: { + query: "cold porridge", + max_gaps: 4, + ordered: true, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc b/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc new file mode 100644 index 000000000..dad59f975 --- /dev/null +++ b/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "jinaai-index", + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + semantic: { + field: "content", + query: "who inspired taking care of the sea?", + }, + }, + }, + }, + field: "content", + rank_window_size: 100, + inference_id: "jinaai-rerank", + inference_text: "who inspired taking care of the sea?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc b/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc new file mode 100644 index 000000000..339435f69 --- /dev/null +++ b/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "jinaai-embeddings", + inference_config: { + service: "jinaai", + service_settings: { + model_id: "jina-embeddings-v3", + api_key: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 31a927b83..840060bdf 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -609,9 +609,9 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] === info Get cluster info. -Returns basic information about the cluster. +Get basic build, version, and cluster information. -{ref}/index.html[Endpoint documentation] +{ref}/rest-api-root.html[Endpoint documentation] [source,ts] ---- client.info() @@ -2768,7 +2768,6 @@ client.cluster.allocationExplain({ ... }) [discrete] ==== delete_component_template Delete component templates. -Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. {ref}/indices-component-template.html[Endpoint documentation] @@ -2835,7 +2834,7 @@ Defaults to false, which means information is retrieved from the master node. [discrete] ==== get_component_template Get component templates. -Retrieves information about component templates. +Get information about component templates. {ref}/indices-component-template.html[Endpoint documentation] [source,ts] @@ -2998,7 +2997,6 @@ is satisfied, the request fails and returns an error. [discrete] ==== put_component_template Create or update a component template. -Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. @@ -3014,6 +3012,11 @@ Changes to component templates do not affect existing indices, including a strea You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. +**Applying component templates** + +You cannot directly apply a component template to a data stream or index. +To be applied, a component template must be included in an index template's `composed_of` list. + {ref}/indices-component-template.html[Endpoint documentation] [source,ts] ---- @@ -3034,7 +3037,7 @@ If you don’t use Elastic Agent and want to disable all built-in component and This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. ** *`_meta` (Optional, Record)*: Optional user metadata about the component template. -May have any contents. This map is not automatically generated by Elasticsearch. +It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. ** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template @@ -3487,14 +3490,31 @@ client.connector.syncJobPost({ id }) [discrete] ==== sync_job_update_stats -Updates the stats fields in the connector sync job document. +Set the connector sync job stats. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. +You can also update `last_seen`. +This API is mainly used by the connector service for updating sync job information. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/set-connector-sync-job-stats-api.html[Endpoint documentation] [source,ts] ---- -client.connector.syncJobUpdateStats() +client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. +** *`deleted_document_count` (number)*: The number of documents the sync job deleted. +** *`indexed_document_count` (number)*: The number of documents the sync job indexed. +** *`indexed_document_volume` (number)*: The total size of the data (in MiB) the sync job indexed. +** *`last_seen` (Optional, string | -1 | 0)*: The timestamp to use in the `last_seen` property for the connector sync job. +** *`metadata` (Optional, Record)*: The connector-specific metadata. +** *`total_document_count` (Optional, number)*: The total number of documents in the target index after the sync job finished. [discrete] ==== update_active_filtering @@ -3775,11 +3795,10 @@ client.connector.updateStatus({ connector_id, status }) [discrete] ==== delete_dangling_index Delete a dangling index. - If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/dangling-index-delete.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) @@ -3801,7 +3820,7 @@ Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/dangling-index-import.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) @@ -3826,7 +3845,7 @@ For example, this can happen if you delete more than `cluster.indices.tombstones Use this API to list dangling indices, which you can then import or delete. -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] +{ref}/dangling-indices-list.html[Endpoint documentation] [source,ts] ---- client.danglingIndices.listDanglingIndices() @@ -4449,8 +4468,8 @@ client.ilm.moveToStep({ index, current_step, next_step }) * *Request (object):* ** *`index` (string)*: The name of the index whose lifecycle step is to change -** *`current_step` ({ action, name, phase })* -** *`next_step` ({ action, name, phase })* +** *`current_step` ({ action, name, phase })*: The step that the index is expected to be in. +** *`next_step` ({ action, name, phase })*: The step that you want to run. [discrete] ==== put_lifecycle @@ -4527,8 +4546,8 @@ client.ilm.start({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] ==== stop @@ -4549,8 +4568,8 @@ client.ilm.stop({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] === indices @@ -4580,7 +4599,12 @@ client.indices.addBlock({ index, block }) [discrete] ==== analyze Get tokens from text analysis. -The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. +The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. +The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. +If more than this limit of tokens gets generated, an error occurs. +The `_analyze` endpoint without a specified index will always use `10000` as its limit. {ref}/indices-analyze.html[Endpoint documentation] [source,ts] @@ -4615,6 +4639,10 @@ Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. +By default, the clear cache API clears all caches. +To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. +To clear the cache only of specific fields, use the `fields` parameter. + {ref}/indices-clearcache.html[Endpoint documentation] [source,ts] ---- @@ -4663,10 +4691,32 @@ Cloning works as follows: IMPORTANT: Indices can only be cloned if they meet the following requirements: +* The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. +The current write index on a data stream cannot be cloned. +In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. + +NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. + +**Monitor the cloning process** + +The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. +At this point, all shards are in the state unassigned. +If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. + +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. +When the clone operation completes, the shard will become active. +At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. + +**Wait for active shards** + +Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. + {ref}/indices-clone-index.html[Endpoint documentation] [source,ts] ---- @@ -4737,7 +4787,25 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== create Create an index. -Creates a new index. +You can use the create index API to add a new index to an Elasticsearch cluster. +When creating an index, you can specify the following: + +* Settings for the index. +* Mappings for fields in the index. +* Index aliases + +**Wait for active shards** + +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. +The index creation response will indicate what happened. +For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. +Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. +These values simply indicate whether the operation completed before the timeout. +If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. +If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. +Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. {ref}/indices-create-index.html[Endpoint documentation] [source,ts] @@ -4812,7 +4880,12 @@ Supports a list of values, such as `open,hidden`. [discrete] ==== delete Delete indices. -Deletes one or more indices. +Deleting an index deletes its documents, shards, and metadata. +It does not delete related Kibana components, such as data views, visualizations, or dashboards. + +You cannot delete the current write index of a data stream. +To delete the index, you must roll over the data stream so a new write index is created. +You can then use the delete index API to delete the previous write index. {ref}/indices-delete-index.html[Endpoint documentation] [source,ts] @@ -4845,7 +4918,7 @@ If no response is received before the timeout expires, the request fails and ret Delete an alias. Removes a data stream or index from an alias. -{ref}/indices-aliases.html[Endpoint documentation] +{ref}/indices-delete-alias.html[Endpoint documentation] [source,ts] ---- client.indices.deleteAlias({ index, name }) @@ -4926,7 +4999,7 @@ client.indices.deleteIndexTemplate({ name }) [discrete] ==== delete_template -Deletes a legacy index template. +Delete a legacy index template. {ref}/indices-delete-template-v1.html[Endpoint documentation] [source,ts] @@ -4952,6 +5025,10 @@ Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. +NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. +Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. +The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. + {ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] ---- @@ -5004,7 +5081,7 @@ client.indices.downsample({ index, target_index }) [discrete] ==== exists Check indices. -Checks if one or more indices, index aliases, or data streams exist. +Check if one or more indices, index aliases, or data streams exist. {ref}/indices-exists.html[Endpoint documentation] [source,ts] @@ -5076,7 +5153,10 @@ client.indices.existsIndexTemplate({ name }) [discrete] ==== exists_template Check existence of index templates. -Returns information about whether a particular index template exists. +Get information about whether index templates exist. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. {ref}/indices-template-exists-v1.html[Endpoint documentation] [source,ts] @@ -5088,10 +5168,13 @@ client.indices.existsTemplate({ name }) ==== Arguments * *Request (object):* -** *`name` (string | string[])*: The comma separated names of the index templates -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`name` (string | string[])*: A list of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +** *`flat_settings` (Optional, boolean)*: Indicates whether to use a flat format for the response. +** *`local` (Optional, boolean)*: Indicates whether to get information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== explain_data_lifecycle @@ -5119,6 +5202,9 @@ Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. +The response body reports the per-shard usage count of the data structures that back the fields in the index. +A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. + {ref}/field-usage-stats.html[Endpoint documentation] [source,ts] ---- @@ -5199,6 +5285,46 @@ But force merge can cause very large (greater than 5 GB) segments to be produced So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. +**Blocks during a force merge** + +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). +If the client connection is lost before completion then the force merge process will continue in the background. +Any new requests to force merge the same indices will also block until the ongoing force merge is complete. + +**Running force merge asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. +However, you can not cancel this task as the force merge task is not cancelable. +Elasticsearch creates a record of this task as a document at `_tasks/`. +When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +**Force merging multiple indices** + +You can force merge multiple indices with a single request by targeting: + +* One or more data streams that contain multiple backing indices +* Multiple indices +* One or more aliases +* All data streams and indices in a cluster + +Each targeted shard is force-merged separately using the force_merge threadpool. +By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. +If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + +Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. + +**Data streams and time-based indices** + +Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. +In these cases, each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, its shards can be force-merged to a single segment. +This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. +For example: + +``` +POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +``` + {ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] ---- @@ -5221,7 +5347,7 @@ client.indices.forcemerge({ ... }) [discrete] ==== get Get index information. -Returns information about one or more indices. For data streams, the API returns information about the +Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. {ref}/indices-get-index.html[Endpoint documentation] @@ -5253,8 +5379,6 @@ such as open,hidden. ==== get_alias Get aliases. Retrieves information for one or more data stream or index aliases. - -{ref}/indices-aliases.html[Endpoint documentation] [source,ts] ---- client.indices.getAlias({ ... }) @@ -5303,6 +5427,18 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +==== get_data_lifecycle_stats +Get data stream lifecycle stats. +Get statistics about the data streams that are managed by a data stream lifecycle. + +{ref}/data-streams-get-lifecycle-stats.html[Endpoint documentation] +[source,ts] +---- +client.indices.getDataLifecycleStats() +---- + + [discrete] ==== get_data_stream Get data streams. @@ -5332,6 +5468,8 @@ Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. +This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. + {ref}/indices-get-field-mapping.html[Endpoint documentation] [source,ts] ---- @@ -5343,6 +5481,7 @@ client.indices.getFieldMapping({ fields }) * *Request (object):* ** *`fields` (string | string[])*: List or wildcard expression of fields used to limit returned information. +Supports wildcards (`*`). ** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. @@ -5359,7 +5498,7 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== get_index_template Get index templates. -Returns information about one or more index templates. +Get information about one or more index templates. {ref}/indices-get-template.html[Endpoint documentation] [source,ts] @@ -5380,7 +5519,6 @@ client.indices.getIndexTemplate({ ... }) [discrete] ==== get_mapping Get mapping definitions. -Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. {ref}/indices-get-mapping.html[Endpoint documentation] @@ -5410,8 +5548,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_settings Get index settings. -Returns setting information for one or more indices. For data streams, -returns setting information for the stream’s backing indices. +Get setting information for one or more indices. +For data streams, it returns setting information for the stream's backing indices. {ref}/indices-get-settings.html[Endpoint documentation] [source,ts] @@ -5447,7 +5585,9 @@ error. [discrete] ==== get_template Get index templates. -Retrieves information about one or more index templates. +Get information about one or more index templates. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. {ref}/indices-get-template-v1.html[Endpoint documentation] [source,ts] @@ -5514,9 +5654,30 @@ client.indices.modifyDataStream({ actions }) [discrete] ==== open -Opens a closed index. +Open a closed index. For data streams, the API opens any closed backing indices. +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. + +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behavior can be turned off by using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. + {ref}/indices-open-close.html[Endpoint documentation] [source,ts] ---- @@ -5649,6 +5810,32 @@ If no response is received before the timeout expires, the request fails and ret Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. +Index templates are applied during data stream or index creation. +For data streams, these settings and mappings are applied when the stream's backing indices are created. +Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. +Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Multiple matching templates** + +If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. + +Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + +**Composing aliases, mappings, and settings** + +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. +Any mappings, settings, or aliases from the parent index template are merged in next. +Finally, any configuration on the index request itself is merged. +Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. +If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. +This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. +If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. +If an entry already exists with the same key, then it is overwritten by the new definition. + {ref}/indices-put-template.html[Endpoint documentation] [source,ts] ---- @@ -5674,9 +5861,13 @@ If no priority is specified the template is treated as though it is of priority This number is not automatically generated by Elasticsearch. ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. +External systems can use these version numbers to simplify template management. +To unset a version, replace the template without specifying one. ** *`_meta` (Optional, Record)*: Optional user metadata about the index template. -May have any contents. -This map is not automatically generated by Elasticsearch. +It may have any contents. +It is not automatically generated or used by Elasticsearch. +This user-defined object is stored in the cluster state, so keeping it short is preferable +To unset the metadata, replace the template without specifying it. ** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. @@ -5692,10 +5883,35 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_mapping Update field mappings. -Adds new fields to an existing data stream or index. -You can also use this API to change the search settings of existing fields. +Add new fields to an existing data stream or index. +You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. +**Add multi-fields to an existing field** + +Multi-fields let you index the same field in different ways. +You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. +WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. +You can populate the new multi-field with the update by query API. + +**Change supported mapping parameters for an existing field** + +The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. +For example, you can use the update mapping API to update the `ignore_above` parameter. + +**Change the mapping of an existing field** + +Except for supported mapping parameters, you can't change the mapping or field type of an existing field. +Changing an existing field could invalidate data that's already indexed. + +If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. +If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. + +**Rename a field** + +Renaming a field would invalidate data already indexed under the old field name. +Instead, add an alias field to create an alternate field name. + {ref}/indices-put-mapping.html[Endpoint documentation] [source,ts] ---- @@ -5742,8 +5958,21 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_settings Update index settings. -Changes dynamic index settings in real time. For data streams, index setting -changes are applied to all backing indices by default. +Changes dynamic index settings in real time. +For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. +The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. +To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + +NOTE: You can only define new analyzers on closed indices. +To add an analyzer, you must close the index, define the analyzer, and reopen the index. +You cannot close the write index of a data stream. +To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. +Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. +This affects searches and any new data added to the stream after the rollover. +However, it does not affect the data stream's backing indices or their existing data. +To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. {ref}/indices-update-settings.html[Endpoint documentation] [source,ts] @@ -5792,6 +6021,15 @@ Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Indices matching multiple templates** + +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. +The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. +NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. + {ref}/indices-templates-v1.html[Endpoint documentation] [source,ts] ---- @@ -5815,6 +6053,7 @@ Templates with lower 'order' values are merged first. Templates with higher ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. ** *`version` (Optional, number)*: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. +To unset a version, replace the template without specifying one. ** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -5826,6 +6065,8 @@ Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. + Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. @@ -5866,6 +6107,17 @@ Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. +You can change this default interval with the `index.refresh_interval` setting. + +Refresh requests are synchronous and do not return a response until the refresh operation completes. + +Refreshes are resource-intensive. +To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. + +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. +This option ensures the indexing operation waits for a periodic refresh before running the search. + {ref}/indices-refresh.html[Endpoint documentation] [source,ts] ---- @@ -5938,6 +6190,18 @@ For each cluster in the index expression, information is returned about: * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. +Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + +**Advantages of using this endpoint before a cross-cluster search** + +You may want to exclude a cluster or index from a search when: + +* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. +* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. +* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) +* A remote cluster is an older version that does not support the feature you want to use in your search. + {ref}/indices-resolve-cluster-api.html[Endpoint documentation] [source,ts] ---- @@ -5990,7 +6254,43 @@ For example, a request targeting `foo*,bar*` returns an error if an index starts [discrete] ==== rollover Roll over to a new index. -Creates a new index for a data stream or index alias. +TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. + +The rollover API creates a new index for a data stream or index alias. +The API behavior depends on the rollover target. + +**Roll over a data stream** + +If you roll over a data stream, the API creates a new write index for the stream. +The stream's previous write index becomes a regular backing index. +A rollover also increments the data stream's generation. + +**Roll over an index alias with a write index** + +TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. +Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. + +If an index alias points to multiple indices, one of the indices must be a write index. +The rollover API creates a new write index for the alias with `is_write_index` set to `true`. +The API also `sets is_write_index` to `false` for the previous write index. + +**Roll over an index alias with one index** + +If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. + +NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. + +**Increment index names for an alias** + +When you roll over an index alias, you can specify a name for the new index. +If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. +For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. +This number is always six characters and zero-padded, regardless of the previous index's name. + +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. +For example, you can create an alias that points to an index named ``. +If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. +If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. {ref}/indices-rollover-index.html[Endpoint documentation] [source,ts] @@ -6147,7 +6447,7 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== simulate_index_template Simulate an index. -Returns the index configuration that would be applied to the specified index from an existing index template. +Get the index configuration that would be applied to the specified index from an existing index template. {ref}/indices-simulate-index.html[Endpoint documentation] [source,ts] @@ -6166,7 +6466,7 @@ client.indices.simulateIndexTemplate({ name }) [discrete] ==== simulate_template Simulate an index template. -Returns the index configuration that would be applied by a particular index template. +Get the index configuration that would be applied by a particular index template. {ref}/indices-simulate-template.html[Endpoint documentation] [source,ts] @@ -6217,6 +6517,15 @@ Split an index into a new index with more primary shards. * The index must be read-only. * The cluster health status must be green. +You can do make an index read-only with the following request using the add index block API: + +``` +PUT /my_source_index/_block/write +``` + +The current write index on a data stream cannot be split. +In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. + The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. @@ -6504,7 +6813,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_ip_location_database -Deletes an IP location database configuration. +Delete IP geolocation database configurations. {ref}/delete-ip-location-database-api.html[Endpoint documentation] [source,ts] @@ -6516,10 +6825,13 @@ client.ingest.deleteIpLocationDatabase({ id }) ==== Arguments * *Request (object):* -** *`id` (string | string[])*: A list of IP location database configurations to delete -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`id` (string | string[])*: A list of IP location database configurations. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. [discrete] ==== delete_pipeline @@ -6578,7 +6890,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_ip_location_database -Returns information about one or more IP location database configurations. +Get IP geolocation database configurations. {ref}/get-ip-location-database-api.html[Endpoint documentation] [source,ts] @@ -6593,8 +6905,9 @@ client.ingest.getIpLocationDatabase({ ... }) ** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. [discrete] ==== get_pipeline @@ -6635,8 +6948,8 @@ client.ingest.processorGrok() [discrete] ==== put_geoip_database -Create or update GeoIP database configurations. -Create or update IP geolocation database configurations. +Create or update a GeoIP database configuration. +Refer to the create or update IP geolocation database configuration API. {ref}/put-geoip-database-api.html[Endpoint documentation] [source,ts] @@ -6658,7 +6971,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_ip_location_database -Returns information about one or more IP location database configurations. +Create or update an IP geolocation database configuration. {ref}/put-ip-location-database-api.html[Endpoint documentation] [source,ts] @@ -6670,11 +6983,14 @@ client.ingest.putIpLocationDatabase({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: ID of the database configuration to create or update. +** *`id` (string)*: The database configuration identifier. ** *`configuration` (Optional, { name, maxmind, ipinfo })* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. +A value of `-1` indicates that the request should never time out. [discrete] ==== put_pipeline @@ -6864,8 +7180,8 @@ client.license.postStartTrial({ ... }) [discrete] ==== delete_pipeline Delete a Logstash pipeline. - Delete a pipeline that is used for Logstash Central Management. +If the request succeeds, you receive an empty response with an appropriate status code. {ref}/logstash-api-delete-pipeline.html[Endpoint documentation] [source,ts] @@ -6882,7 +7198,6 @@ client.logstash.deletePipeline({ id }) [discrete] ==== get_pipeline Get Logstash pipelines. - Get pipelines that are used for Logstash Central Management. {ref}/logstash-api-get-pipeline.html[Endpoint documentation] @@ -6924,7 +7239,8 @@ client.logstash.putPipeline({ id }) Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. -TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. +TIP: This APIs is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. {ref}/migration-api-deprecation.html[Endpoint documentation] [source,ts] @@ -6945,9 +7261,9 @@ Version upgrades sometimes require changes to how features store configuration i Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. -We strongly recommend you use the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. -{ref}/migration-api-feature-upgrade.html[Endpoint documentation] +{ref}/feature-migration-api.html[Endpoint documentation] [source,ts] ---- client.migration.getFeatureUpgradeStatus() @@ -6964,7 +7280,7 @@ Some functionality might be temporarily unavailable during the migration process TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. -{ref}/migration-api-feature-upgrade.html[Endpoint documentation] +{ref}/feature-migration-api.html[Endpoint documentation] [source,ts] ---- client.migration.postFeatureUpgrade() @@ -8032,8 +8348,8 @@ Currently, for NLP models, only a single value is allowed. [discrete] ==== info -Return ML defaults and limits. -Returns defaults and limits used by machine learning. +Get machine learning information. +Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be @@ -8204,6 +8520,11 @@ client.ml.putCalendarJob({ calendar_id, job_id }) Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. +By default, the query used in the source configuration is `{"match_all": {}}`. + +If the destination index does not exist, it is created automatically when you start the job. + +If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. {ref}/put-dfanalytics.html[Endpoint documentation] [source,ts] @@ -8280,6 +8601,8 @@ Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. @@ -8368,6 +8691,7 @@ Up to 10000 items are allowed in each filter. ==== put_job Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. +If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. {ref}/ml-put-job.html[Endpoint documentation] [source,ts] @@ -9076,8 +9400,7 @@ client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) * *Request (object):* ** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. -All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). -** *`max_archive_version` (number)*: Specifies the maximum [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) to be cleared from the archive. +** *`max_archive_version` (number)*: Specifies the maximum `archive_version` to be cleared from the archive. [discrete] ==== get_repositories_metering_info @@ -9233,6 +9556,7 @@ If no response is received before the timeout expires, the request fails and ret ==== delete_rule Delete a query rule. Delete a query rule within a query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. {ref}/delete-query-rule.html[Endpoint documentation] [source,ts] @@ -9250,6 +9574,8 @@ client.queryRules.deleteRule({ ruleset_id, rule_id }) [discrete] ==== delete_ruleset Delete a query ruleset. +Remove a query ruleset and its associated data. +This is a destructive action that is not recoverable. {ref}/delete-query-ruleset.html[Endpoint documentation] [source,ts] @@ -9313,14 +9639,19 @@ client.queryRules.listRulesets({ ... }) ==== Arguments * *Request (object):* -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: specifies a max number of results to get +** *`from` (Optional, number)*: The offset from the first result to fetch. +** *`size` (Optional, number)*: The maximum number of results to retrieve. [discrete] ==== put_rule Create or update a query rule. Create or update a query rule within a query ruleset. +IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + {ref}/put-query-rule.html[Endpoint documentation] [source,ts] ---- @@ -9331,16 +9662,25 @@ client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) ==== Arguments * *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated -** *`type` (Enum("pinned" | "exclude"))* -** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])* -** *`actions` ({ ids, docs })* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated. +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated. +** *`type` (Enum("pinned" | "exclude"))*: The type of rule. +** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])*: The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. +** *`actions` ({ ids, docs })*: The actions to take when the rule is matched. +The format of this action depends on the rule type. ** *`priority` (Optional, number)* [discrete] ==== put_ruleset Create or update a query ruleset. +There is a limit of 100 rules per ruleset. +This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + +IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. {ref}/put-query-ruleset.html[Endpoint documentation] [source,ts] @@ -9352,7 +9692,7 @@ client.queryRules.putRuleset({ ruleset_id, rules }) ==== Arguments * *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated. ** *`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])* [discrete] @@ -9371,7 +9711,8 @@ client.queryRules.test({ ruleset_id, match_criteria }) * *Request (object):* ** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated -** *`match_criteria` (Record)* +** *`match_criteria` (Record)*: The match criteria to apply to rules in the given query ruleset. +Match criteria should match the keys defined in the `criteria.metadata` field of the rule. [discrete] === rollup @@ -9538,6 +9879,39 @@ Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. +The request body supports a subset of features from the regular search API. +The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. +`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. + +**Searching both historical rollup and non-rollup data** + +The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. +This is done by simply adding the live indices to the URI. For example: + +``` +GET sensor-1,sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +``` + +The rollup search endpoint does two things when the search runs: + +* The original request is sent to the non-rollup index unaltered. +* A rewritten version of the original request is sent to the rollup index. + +When the two responses are received, the endpoint rewrites the rollup response and merges the two together. +During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. + {ref}/rollup-search.html[Endpoint documentation] [source,ts] ---- @@ -9548,9 +9922,15 @@ client.rollup.rollupSearch({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. +** *`index` (string | string[])*: A list of data streams and indices used to limit the request. +This parameter has the following rules: + +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* Multiple non-rollup indices may be specified. +* Only one rollup index may be specified. If more than one are supplied, an exception occurs. +* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. ** *`aggregations` (Optional, Record)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query that is subject to some limitations. ** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. ** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response ** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response @@ -9579,6 +9959,15 @@ Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. +This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: + +``` +POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +``` +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. +If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. + {ref}/rollup-stop-job.html[Endpoint documentation] [source,ts] ---- @@ -9592,6 +9981,8 @@ client.rollup.stopJob({ id }) ** *`id` (string)*: Identifier for the rollup job. ** *`timeout` (Optional, string | -1 | 0)*: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. +NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. +The timeout simply means the API call itself timed out while waiting for the status change. ** *`wait_for_completion` (Optional, boolean)*: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. @@ -9684,14 +10075,22 @@ client.searchApplication.list({ ... }) [discrete] ==== post_behavioral_analytics_event -Creates a behavioral analytics event for existing collection. +Create a behavioral analytics collection event. -http://todo.com/tbd[Endpoint documentation] +{ref}/post-analytics-collection-event.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.postBehavioralAnalyticsEvent() +client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`collection_name` (string)*: The name of the behavioral analytics collection. +** *`event_type` (Enum("page_view" | "search" | "search_click"))*: The analytics event type. +** *`payload` (Optional, User-defined value)* +** *`debug` (Optional, boolean)*: Whether the response type has to include more details [discrete] ==== put @@ -9776,7 +10175,7 @@ client.searchApplication.search({ name }) Get cache statistics. Get statistics about the shared cache for partially mounted indices. -{ref}/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-api-cache-stats.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.cacheStats({ ... }) @@ -9786,7 +10185,7 @@ client.searchableSnapshots.cacheStats({ ... }) ==== Arguments * *Request (object):* -** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +** *`node_id` (Optional, string | string[])*: The names of the nodes in the cluster to target. ** *`master_timeout` (Optional, string | -1 | 0)* [discrete] @@ -9794,7 +10193,7 @@ client.searchableSnapshots.cacheStats({ ... }) Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. -{ref}/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-api-clear-cache.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.clearCache({ ... }) @@ -9804,7 +10203,8 @@ client.searchableSnapshots.clearCache({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to clear from the cache. +It supports wildcards (`*`). ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. ** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) ** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) @@ -9828,21 +10228,24 @@ client.searchableSnapshots.mount({ repository, snapshot, index }) ==== Arguments * *Request (object):* -** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount -** *`snapshot` (string)*: The name of the snapshot of the index to mount -** *`index` (string)* -** *`renamed_index` (Optional, string)* -** *`index_settings` (Optional, Record)* -** *`ignore_index_settings` (Optional, string[])* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning -** *`storage` (Optional, string)*: Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy` +** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount. +** *`snapshot` (string)*: The name of the snapshot of the index to mount. +** *`index` (string)*: The name of the index contained in the snapshot whose data is to be mounted. +If no `renamed_index` is specified, this name will also be used to create the new index. +** *`renamed_index` (Optional, string)*: The name of the index that will be created. +** *`index_settings` (Optional, Record)*: The settings that should be added to the index when it is mounted. +** *`ignore_index_settings` (Optional, string[])*: The names of settings that should be removed from the index when it is mounted. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`wait_for_completion` (Optional, boolean)*: If true, the request blocks until the operation is complete. +** *`storage` (Optional, string)*: The mount option for the searchable snapshot index. [discrete] ==== stats Get searchable snapshot statistics. -{ref}/searchable-snapshots-apis.html[Endpoint documentation] +{ref}/searchable-snapshots-api-stats.html[Endpoint documentation] [source,ts] ---- client.searchableSnapshots.stats({ ... }) @@ -9852,7 +10255,7 @@ client.searchableSnapshots.stats({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names +** *`index` (Optional, string | string[])*: A list of data streams and indices to retrieve statistics for. ** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Return stats aggregated at cluster, index or shard level [discrete] @@ -9936,14 +10339,47 @@ client.security.bulkPutRole({ roles }) [discrete] ==== bulk_update_api_keys -Updates the attributes of multiple existing API keys. +Bulk update API keys. +Update the attributes for multiple API keys. + +IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. + +This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. + +It is not possible to update expired or invalidated API keys. + +This API supports updates to API key access scope, metadata and expiration. +The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. {ref}/security-api-bulk-update-api-keys.html[Endpoint documentation] [source,ts] ---- -client.security.bulkUpdateApiKeys() +client.security.bulkUpdateApiKeys({ ids }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`ids` (string | string[])*: The API key identifiers. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API keys. +By default, API keys never expire. +This property can be omitted to leave the value unchanged. +** *`metadata` (Optional, Record)*: Arbitrary nested metadata to associate with the API keys. +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. +** *`role_descriptors` (Optional, Record)*: The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. +The structure of a role descriptor is the same as the request for the create API keys API. [discrete] ==== change_password @@ -10156,6 +10592,35 @@ client.security.createServiceToken({ namespace, service }) ** *`name` (Optional, string)*: An identifier for the token name ** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +[discrete] +==== delegate_pki +Delegate PKI authentication. +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. +The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. +A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. + +This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. + +IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. +This is part of the TLS authentication process and it is delegated to the proxy that calls this API. +The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. + +{ref}/security-api-delegate-pki-authentication.html[Endpoint documentation] +[source,ts] +---- +client.security.delegatePki({ x509_certificate_chain }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`x509_certificate_chain` (string[])*: The X509Certificate chain, which is represented as an ordered string array. +Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + +The first element is the target certificate that contains the subject distinguished name that is requesting access. +This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. + [discrete] ==== delete_privileges Delete application privileges. @@ -10734,36 +11199,86 @@ client.security.invalidateToken({ ... }) [discrete] ==== oidc_authenticate -Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair +Authenticate OpenID Connect. +Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. {ref}/security-api-oidc-authenticate.html[Endpoint documentation] [source,ts] ---- -client.security.oidcAuthenticate() +client.security.oidcAuthenticate({ nonce, redirect_uri, state }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`nonce` (string)*: Associate a client session with an ID token and mitigate replay attacks. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +** *`redirect_uri` (string)*: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. +This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. +** *`state` (string)*: Maintain state between the authentication request and the response. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +** *`realm` (Optional, string)*: The name of the OpenID Connect realm. +This property is useful in cases where multiple realms are defined. [discrete] ==== oidc_logout -Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API +Logout of OpenID Connect. +Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. + +If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. {ref}/security-api-oidc-logout.html[Endpoint documentation] [source,ts] ---- -client.security.oidcLogout() +client.security.oidcLogout({ access_token }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`access_token` (string)*: The access token to be invalidated. +** *`refresh_token` (Optional, string)*: The refresh token to be invalidated. [discrete] ==== oidc_prepare_authentication -Creates an OAuth 2.0 authentication request as a URL string +Prepare OpenID connect authentication. +Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. + +The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. {ref}/security-api-oidc-prepare-authentication.html[Endpoint documentation] [source,ts] ---- -client.security.oidcPrepareAuthentication() +client.security.oidcPrepareAuthentication({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`iss` (Optional, string)*: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. +It cannot be specified when *realm* is specified. +One of *realm* or *iss* is required. +** *`login_hint` (Optional, string)*: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. +This parameter is not valid when *realm* is specified. +** *`nonce` (Optional, string)*: The value used to associate a client session with an ID token and to mitigate replay attacks. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +** *`realm` (Optional, string)*: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. +It cannot be specified when *iss* is specified. +One of *realm* or *iss* is required. +** *`state` (Optional, string)*: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. [discrete] ==== put_privileges @@ -11244,7 +11759,7 @@ Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +{ref}/delete-shutdown.html[Endpoint documentation] [source,ts] ---- client.shutdown.deleteNode({ node_id }) @@ -11269,7 +11784,7 @@ NOTE: This feature is designed for indirect use by Elasticsearch Service, Elasti If the operator privileges feature is enabled, you must be an operator to use this API. -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +{ref}/get-shutdown.html[Endpoint documentation] [source,ts] ---- client.shutdown.getNode({ ... }) @@ -11289,6 +11804,8 @@ Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. + If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. @@ -11300,7 +11817,7 @@ If a node is already being prepared for shutdown, you can use this API to change IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] +{ref}/put-shutdown.html[Endpoint documentation] [source,ts] ---- client.shutdown.putNode({ node_id, type, reason }) @@ -11310,7 +11827,10 @@ client.shutdown.putNode({ node_id, type, reason }) ==== Arguments * *Request (object):* -** *`node_id` (string)*: The node id of node to be shut down +** *`node_id` (string)*: The node identifier. +This parameter is not validated against the cluster's active nodes. +This enables you to register a node for shut down while it is offline. +No error is thrown if you specify an invalid node ID. ** *`type` (Enum("restart" | "remove" | "replace"))*: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. @@ -11328,21 +11848,55 @@ If you specify both a restart allocation delay and an index-level allocation del Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] === simulate [discrete] ==== ingest -Simulates running ingest with example documents. +Simulate data ingestion. +Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. + +This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. + +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. +If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. +No data is indexed into Elasticsearch. +Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. +The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. + +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. +The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. + +By default, the pipeline definitions that are currently in the system are used. +However, you can supply substitute pipeline definitions in the body of the request. +These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. {ref}/simulate-ingest-api.html[Endpoint documentation] [source,ts] ---- -client.simulate.ingest() +client.simulate.ingest({ docs }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. +** *`index` (Optional, string)*: The index to simulate ingesting into. +This value can be overridden by specifying an index on each document. +If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. +** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. +** *`index_template_subtitutions` (Optional, Record)*: A map of index template names to substitute index template definition objects. +** *`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })* +** *`pipeline_substitutions` (Optional, Record)*: Pipelines to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +** *`pipeline` (Optional, string)*: The pipeline to use as the default pipeline. +This value can be used to override the default pipeline of the index. [discrete] === slm @@ -11363,6 +11917,10 @@ client.slm.deleteLifecycle({ policy_id }) * *Request (object):* ** *`policy_id` (string)*: The id of the snapshot lifecycle policy to remove +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== execute_lifecycle @@ -11381,6 +11939,10 @@ client.slm.executeLifecycle({ policy_id }) * *Request (object):* ** *`policy_id` (string)*: The id of the snapshot lifecycle policy to be executed +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== execute_retention @@ -11391,9 +11953,17 @@ The retention policy is normally applied according to its schedule. {ref}/slm-api-execute-retention.html[Endpoint documentation] [source,ts] ---- -client.slm.executeRetention() +client.slm.executeRetention({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_lifecycle @@ -11411,6 +11981,10 @@ client.slm.getLifecycle({ ... }) * *Request (object):* ** *`policy_id` (Optional, string | string[])*: List of snapshot lifecycle policies to retrieve +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_stats @@ -11420,9 +11994,15 @@ Get global and policy-level statistics about actions taken by snapshot lifecycle {ref}/slm-api-get-stats.html[Endpoint documentation] [source,ts] ---- -client.slm.getStats() +client.slm.getStats({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_status @@ -11431,9 +12011,19 @@ Get the snapshot lifecycle management status. {ref}/slm-api-get-status.html[Endpoint documentation] [source,ts] ---- -client.slm.getStatus() +client.slm.getStatus({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== put_lifecycle @@ -11458,8 +12048,12 @@ client.slm.putLifecycle({ policy_id }) ** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. ** *`retention` (Optional, { expire_after, max_count, min_count })*: Retention rules used to retain and delete snapshots created by the policy. ** *`schedule` (Optional, string)*: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== start @@ -11470,9 +12064,19 @@ Manually starting SLM is necessary only if it has been stopped using the stop SL {ref}/slm-api-start.html[Endpoint documentation] [source,ts] ---- -client.slm.start() +client.slm.start({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== stop @@ -11488,9 +12092,19 @@ Use the get snapshot lifecycle management status API to see if SLM is running. {ref}/slm-api-stop.html[Endpoint documentation] [source,ts] ---- -client.slm.stop() +client.slm.stop({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] === snapshot @@ -11518,7 +12132,7 @@ client.snapshot.cleanupRepository({ repository }) Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/clone-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) @@ -11540,7 +12154,7 @@ client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) Create a snapshot. Take a snapshot of a cluster or of data streams and indices. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/create-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.create({ repository, snapshot }) @@ -11587,7 +12201,7 @@ client.snapshot.createRepository({ repository }) ==== delete Delete snapshots. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/delete-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.delete({ repository, snapshot }) @@ -11607,7 +12221,7 @@ Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/delete-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.deleteRepository({ repository }) @@ -11625,7 +12239,7 @@ client.snapshot.deleteRepository({ repository }) ==== get Get snapshot information. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/get-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.get({ repository, snapshot }) @@ -11657,7 +12271,7 @@ client.snapshot.get({ repository, snapshot }) ==== get_repository Get snapshot repository information. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/get-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.getRepository({ ... }) @@ -11703,7 +12317,7 @@ If no such template exists, you can create one or restore a cluster state that c If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/restore-snapshot-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.restore({ repository, snapshot }) @@ -11742,7 +12356,7 @@ For example, if you have 100 snapshots with 1,000 shards each, an API request th Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/get-snapshot-status-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.status({ ... }) @@ -11762,7 +12376,7 @@ client.snapshot.status({ ... }) Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/verify-snapshot-repo-api.html[Endpoint documentation] [source,ts] ---- client.snapshot.verifyRepository({ repository }) @@ -11800,6 +12414,11 @@ Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. +If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: + +* Users with the `cancel_task` cluster privilege. +* The user who first submitted the search. + {ref}/delete-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- @@ -11810,13 +12429,15 @@ client.sql.deleteAsync({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the search. +** *`id` (string)*: The identifier for the search. [discrete] ==== get_async Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. +If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. + {ref}/get-async-sql-search-api.html[Endpoint documentation] [source,ts] ---- @@ -11827,14 +12448,16 @@ client.sql.getAsync({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the search. -** *`delimiter` (Optional, string)*: Separator for CSV results. The API only supports this parameter for CSV responses. -** *`format` (Optional, string)*: Format for the response. You must specify a format using this parameter or the -Accept HTTP header. If you specify both, the API uses this parameter. -** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for the search and its results. Defaults -to the `keep_alive` period for the original SQL search. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, -meaning the request waits for complete search results. +** *`id` (string)*: The identifier for the search. +** *`delimiter` (Optional, string)*: The separator for CSV results. +The API supports this parameter only for CSV responses. +** *`format` (Optional, string)*: The format for the response. +You must specify a format using this parameter or the `Accept` HTTP header. +If you specify both, the API uses this parameter. +** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for the search and its results. +It defaults to the `keep_alive` period for the original SQL search. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. [discrete] ==== get_async_status @@ -11851,7 +12474,7 @@ client.sql.getAsyncStatus({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the search. +** *`id` (string)*: The identifier for the search. [discrete] ==== query @@ -11868,31 +12491,46 @@ client.sql.query({ ... }) ==== Arguments * *Request (object):* -** *`catalog` (Optional, string)*: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. -** *`columnar` (Optional, boolean)*: If true, the results in a columnar fashion: one row represents all the values of a certain column from the current page of results. -** *`cursor` (Optional, string)*: Cursor used to retrieve a set of paginated results. +** *`allow_partial_search_results` (Optional, boolean)*: If `true`, the response has partial results when there are shard request timeouts or shard failures. +If `false`, the API returns an error with no partial results. +** *`catalog` (Optional, string)*: The default catalog (cluster) for queries. +If unspecified, the queries execute on the data in the local cluster only. +** *`columnar` (Optional, boolean)*: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. +The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. +** *`cursor` (Optional, string)*: The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. -** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. -** *`query` (Optional, string)*: SQL query to run. +** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. +** *`field_multi_value_leniency` (Optional, boolean)*: If `false`, the API returns an exception when encountering multiple values for a field. +If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. +** *`index_using_frozen` (Optional, boolean)*: If `true`, the search can run on frozen indices. +** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for an async or saved synchronous search. +** *`keep_on_completion` (Optional, boolean)*: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. +If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. +** *`page_timeout` (Optional, string | -1 | 0)*: The minimum retention period for the scroll cursor. +After this time period, a pagination request might fail because the scroll cursor is no longer available. +Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. +** *`params` (Optional, Record)*: The values for parameters in the query. +** *`query` (Optional, string)*: The SQL query to run. ** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. -** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. -** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. -** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). -** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. -** *`params` (Optional, Record)*: Values for parameters in the query. -** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search. -** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. -** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false. -** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: Format for the response. +** *`runtime_mappings` (Optional, Record)*: One or more runtime fields for the search request. +These fields take precedence over mapped fields with the same name. +** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. +If the search doesn't finish within this period, the search becomes async. + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: The format for the response. +You can also specify a format using the `Accept` HTTP header. +If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. [discrete] ==== translate Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. +It accepts the same request body parameters as the SQL search API, excluding `cursor`. {ref}/sql-translate-api.html[Endpoint documentation] [source,ts] @@ -11904,10 +12542,10 @@ client.sql.translate({ query }) ==== Arguments * *Request (object):* -** *`query` (string)*: SQL query to run. +** *`query` (string)*: The SQL query to run. ** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. -** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. +** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. [discrete] === ssl @@ -11944,6 +12582,23 @@ client.ssl.certificates() ==== delete_synonym Delete a synonym set. +You can only delete a synonyms set that is not in use by any index analyzer. + +Synonyms sets can be used in synonym graph token filters and synonym token filters. +These synonym filters can be used as part of search analyzers. + +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). +Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. + +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. +To prevent that, synonyms sets that are used in analyzers can't be deleted. +A delete request in this case will return a 400 response code. + +To remove a synonyms set, you must first remove all indices that contain analyzers using it. +You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. +Once finished, you can delete the index. +When the synonyms set is not used in analyzers, you will be able to delete it. + {ref}/delete-synonyms-set.html[Endpoint documentation] [source,ts] ---- @@ -11954,7 +12609,7 @@ client.synonyms.deleteSynonym({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: The id of the synonyms set to be deleted +** *`id` (string)*: The synonyms set identifier to delete. [discrete] ==== delete_synonym_rule @@ -11971,8 +12626,8 @@ client.synonyms.deleteSynonymRule({ set_id, rule_id }) ==== Arguments * *Request (object):* -** *`set_id` (string)*: The id of the synonym set to be updated -** *`rule_id` (string)*: The id of the synonym rule to be deleted +** *`set_id` (string)*: The ID of the synonym set to update. +** *`rule_id` (string)*: The ID of the synonym rule to delete. [discrete] ==== get_synonym @@ -11988,9 +12643,9 @@ client.synonyms.getSynonym({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: "The id of the synonyms set to be retrieved -** *`from` (Optional, number)*: Starting offset for query rules to be retrieved -** *`size` (Optional, number)*: specifies a max number of query rules to retrieve +** *`id` (string)*: The synonyms set identifier to retrieve. +** *`from` (Optional, number)*: The starting offset for query rules to retrieve. +** *`size` (Optional, number)*: The max number of query rules to retrieve. [discrete] ==== get_synonym_rule @@ -12007,15 +12662,15 @@ client.synonyms.getSynonymRule({ set_id, rule_id }) ==== Arguments * *Request (object):* -** *`set_id` (string)*: The id of the synonym set to retrieve the synonym rule from -** *`rule_id` (string)*: The id of the synonym rule to retrieve +** *`set_id` (string)*: The ID of the synonym set to retrieve the synonym rule from. +** *`rule_id` (string)*: The ID of the synonym rule to retrieve. [discrete] ==== get_synonyms_sets Get all synonym sets. Get a summary of all defined synonym sets. -{ref}/list-synonyms-sets.html[Endpoint documentation] +{ref}/get-synonyms-set.html[Endpoint documentation] [source,ts] ---- client.synonyms.getSynonymsSets({ ... }) @@ -12025,8 +12680,8 @@ client.synonyms.getSynonymsSets({ ... }) ==== Arguments * *Request (object):* -** *`from` (Optional, number)*: Starting offset -** *`size` (Optional, number)*: specifies a max number of results to get +** *`from` (Optional, number)*: The starting offset for synonyms sets to retrieve. +** *`size` (Optional, number)*: The maximum number of synonyms sets to retrieve. [discrete] ==== put_synonym @@ -12034,6 +12689,9 @@ Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. +This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + {ref}/put-synonyms-set.html[Endpoint documentation] [source,ts] ---- @@ -12044,14 +12702,18 @@ client.synonyms.putSynonym({ id, synonyms_set }) ==== Arguments * *Request (object):* -** *`id` (string)*: The id of the synonyms set to be created or updated -** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym set information to update +** *`id` (string)*: The ID of the synonyms set to be created or updated. +** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym rules definitions for the synonyms set. [discrete] ==== put_synonym_rule Create or update a synonym rule. Create or update a synonym rule in a synonym set. +If any of the synonym rules included is invalid, the API returns an error. + +When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. + {ref}/put-synonym-rule.html[Endpoint documentation] [source,ts] ---- @@ -12062,9 +12724,9 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) ==== Arguments * *Request (object):* -** *`set_id` (string)*: The id of the synonym set to be updated with the synonym rule -** *`rule_id` (string)*: The id of the synonym rule to be updated or created -** *`synonyms` (string)* +** *`set_id` (string)*: The ID of the synonym set. +** *`rule_id` (string)*: The ID of the synonym rule to be updated or created. +** *`synonyms` (string)*: The synonym rule information definition, which must be in Solr format. [discrete] === tasks @@ -12147,6 +12809,22 @@ This information is useful to distinguish tasks from each other but is more cost Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. +For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + {ref}/find-field-structure.html[Endpoint documentation] [source,ts] ---- @@ -12174,7 +12852,7 @@ Use this parameter to specify whether to use ECS Grok patterns instead of legacy This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. -** *`explain` (Optional, boolean)*: If true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`explain` (Optional, boolean)*: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. ** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. @@ -12189,7 +12867,7 @@ If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. ** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. -Otherwise, the default value is false. +Otherwise, the default value is `false`. ** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. ** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. @@ -12245,6 +12923,7 @@ The messages must contain data that is suitable to be ingested into Elasticsearc This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. + The response from the API contains: * Sample messages. @@ -12255,6 +12934,9 @@ Appropriate mappings for an Elasticsearch index, which you could use to ingest t All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + {ref}/find-message-structure.html[Endpoint documentation] [source,ts] ---- @@ -12293,7 +12975,7 @@ If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. ** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. -Otherwise, the default value is false. +Otherwise, the default value is `false`. ** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. ** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. @@ -12372,23 +13054,100 @@ client.textStructure.findStructure({ ... }) * *Request (object):* ** *`text_files` (Optional, TJsonDocument[])* -** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. -** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. -** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). -** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +** *`charset` (Optional, string)*: The text's character set. +It must be a character set that is supported by the JVM that Elasticsearch uses. +For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +If this parameter is not specified, the structure finder chooses an appropriate character set. +** *`column_names` (Optional, string)*: If you have set format to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +** *`explain` (Optional, boolean)*: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. -** *`format` (Optional, string)*: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. -** *`grok_pattern` (Optional, string)*: If you have set format to semi_structured_text, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the timestamp_field parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If grok_pattern is not specified, the structure finder creates a Grok pattern. -** *`has_header_row` (Optional, boolean)*: If you have set format to delimited, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. -** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. -** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2; If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. -** *`quote` (Optional, string)*: If you have set format to delimited, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote ("). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -** *`should_trim_fields` (Optional, boolean)*: If you have set format to delimited, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. -** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. -** *`timestamp_field` (Optional, string)*: Optional parameter to specify the timestamp field in the file +** *`format` (Optional, string)*: The high level structure of the text. +Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`has_header_row` (Optional, boolean)*: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. +If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. +The minimum is 2. +If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + +NOTE: The number of lines and the variation of the lines affects the speed of the analysis. +For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. +If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. +** *`quote` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. +Otherwise, the default value is `false`. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires then it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. ** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. +Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. + [discrete] ==== test_grok_pattern Test a Grok pattern. @@ -12405,9 +13164,11 @@ client.textStructure.testGrokPattern({ grok_pattern, text }) ==== Arguments * *Request (object):* -** *`grok_pattern` (string)*: Grok pattern to run on the text. -** *`text` (string[])*: Lines of text to run the Grok pattern on. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). +** *`grok_pattern` (string)*: The Grok pattern to run on the text. +** *`text` (string[])*: The lines of text to run the Grok pattern on. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. [discrete] === transform @@ -12800,6 +13561,9 @@ The acknowledgement state of an action is stored in the `status.actions..ack IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. +This happens when the condition of the watch is not met (the condition evaluates to false). + {ref}/watcher-api-ack-watch.html[Endpoint documentation] [source,ts] ---- @@ -12810,8 +13574,9 @@ client.watcher.ackWatch({ watch_id }) ==== Arguments * *Request (object):* -** *`watch_id` (string)*: Watch ID -** *`action_id` (Optional, string | string[])*: A list of the action ids to be acked +** *`watch_id` (string)*: The watch identifier. +** *`action_id` (Optional, string | string[])*: A list of the action identifiers to acknowledge. +If you omit this parameter, all of the actions of the watch are acknowledged. [discrete] ==== activate_watch @@ -12828,7 +13593,7 @@ client.watcher.activateWatch({ watch_id }) ==== Arguments * *Request (object):* -** *`watch_id` (string)*: Watch ID +** *`watch_id` (string)*: The watch identifier. [discrete] ==== deactivate_watch @@ -12845,7 +13610,7 @@ client.watcher.deactivateWatch({ watch_id }) ==== Arguments * *Request (object):* -** *`watch_id` (string)*: Watch ID +** *`watch_id` (string)*: The watch identifier. [discrete] ==== delete_watch @@ -12868,7 +13633,7 @@ client.watcher.deleteWatch({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Watch ID +** *`id` (string)*: The watch identifier. [discrete] ==== execute_watch @@ -12882,6 +13647,11 @@ You can also force execution by ignoring the watch condition and control whether You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. +If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. + +When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. + {ref}/watcher-api-execute-watch.html[Endpoint documentation] [source,ts] ---- @@ -12892,26 +13662,37 @@ client.watcher.executeWatch({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: Identifier for the watch. +** *`id` (Optional, string)*: The watch identifier. ** *`action_modes` (Optional, Record)*: Determines how to handle the watch actions as part of the watch execution. ** *`alternative_input` (Optional, Record)*: When present, the watch uses this object as a payload instead of executing its own input. ** *`ignore_condition` (Optional, boolean)*: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. -** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent executions. This can also be specified as an HTTP parameter. +** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. +In addition, the status of the watch is updated, possibly throttling subsequent runs. +This can also be specified as an HTTP parameter. ** *`simulated_actions` (Optional, { actions, all, use_all })* -** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution -** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and record_execution cannot be set. +** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution. +** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. +This watch is not persisted to the index and `record_execution` cannot be set. ** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. [discrete] ==== get_settings -Retrieve settings for the watcher system index +Get Watcher index settings. +Get settings for the Watcher internal index (`.watches`). +Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. {ref}/watcher-api-get-settings.html[Endpoint documentation] [source,ts] ---- -client.watcher.getSettings() +client.watcher.getSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_watch @@ -12927,7 +13708,7 @@ client.watcher.getWatch({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Watch ID +** *`id` (string)*: The watch identifier. [discrete] ==== put_watch @@ -12954,15 +13735,20 @@ client.watcher.putWatch({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Watch ID -** *`actions` (Optional, Record)* -** *`condition` (Optional, { always, array_compare, compare, never, script })* -** *`input` (Optional, { chain, http, search, simple })* -** *`metadata` (Optional, Record)* -** *`throttle_period` (Optional, string)* -** *`transform` (Optional, { chain, script, search })* -** *`trigger` (Optional, { schedule })* -** *`active` (Optional, boolean)*: Specify whether the watch is in/active by default +** *`id` (string)*: The identifier for the watch. +** *`actions` (Optional, Record)*: The list of actions that will be run if the condition matches. +** *`condition` (Optional, { always, array_compare, compare, never, script })*: The condition that defines if the actions should be run. +** *`input` (Optional, { chain, http, search, simple })*: The input that defines the input that loads the data for the watch. +** *`metadata` (Optional, Record)*: Metadata JSON that will be copied into the history entries. +** *`throttle_period` (Optional, string | -1 | 0)*: The minimum time between actions being run. +The default is 5 seconds. +This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. +If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. +** *`throttle_period_in_millis` (Optional, Unit)*: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. +** *`transform` (Optional, { chain, script, search })*: The transform that processes the watch payload to prepare it for the watch actions. +** *`trigger` (Optional, { schedule })*: The trigger that defines when the watch should run. +** *`active` (Optional, boolean)*: The initial state of the watch. +The default value is `true`, which means the watch is active by default. ** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term ** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number ** *`version` (Optional, number)*: Explicit version number for concurrency control @@ -12972,6 +13758,8 @@ client.watcher.putWatch({ id }) Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. +Note that only the `_id` and `metadata.*` fields are queryable or sortable. + {ref}/watcher-api-query-watches.html[Endpoint documentation] [source,ts] ---- @@ -12982,11 +13770,13 @@ client.watcher.queryWatches({ ... }) ==== Arguments * *Request (object):* -** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. -** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. +** *`from` (Optional, number)*: The offset from the first result to fetch. +It must be non-negative. +** *`size` (Optional, number)*: The number of hits to return. +It must be non-negative. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query that filters the watches to be returned. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: One or more fields used to sort the search results. +** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Retrieve the next page of hits using a set of sort values from the previous page. [discrete] ==== start @@ -13003,6 +13793,8 @@ client.watcher.start() [discrete] ==== stats Get Watcher statistics. +This API always returns basic metrics. +You retrieve more metrics by using the metric parameter. {ref}/watcher-api-stats.html[Endpoint documentation] [source,ts] @@ -13025,20 +13817,40 @@ Stop the Watcher service if it is running. {ref}/watcher-api-stop.html[Endpoint documentation] [source,ts] ---- -client.watcher.stop() +client.watcher.stop({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. [discrete] ==== update_settings -Update settings for the watcher system index +Update Watcher index settings. +Update settings for the Watcher internal index (`.watches`). +Only a subset of settings can be modified. +This includes `index.auto_expand_replicas` and `index.number_of_replicas`. {ref}/watcher-api-update-settings.html[Endpoint documentation] [source,ts] ---- -client.watcher.updateSettings() +client.watcher.updateSettings({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index.auto_expand_replicas` (Optional, string)* +** *`index.number_of_replicas` (Optional, number)* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] === xpack @@ -13061,9 +13873,11 @@ client.xpack.info({ ... }) ==== Arguments * *Request (object):* -** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. For example, `build,license,features`. +** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. +For example, `build,license,features`. ** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true -** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. +** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. +In particular, it adds descriptions and a tag line. [discrete] ==== usage @@ -13081,5 +13895,7 @@ client.xpack.usage({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index d3c805129..99d1b9c87 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -87,7 +87,7 @@ export default class Cluster { } /** - * Delete component templates. Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + * Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html | Elasticsearch API documentation} */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -181,7 +181,7 @@ export default class Cluster { } /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html | Elasticsearch API documentation} */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -383,7 +383,7 @@ export default class Cluster { } /** - * Create or update a component template. Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. + * Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html | Elasticsearch API documentation} */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 4ac485811..f744a6ea8 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -717,22 +717,34 @@ export default class Connector { } /** - * Updates the stats fields in the connector sync job document. + * Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/set-connector-sync-job-stats-api.html | Elasticsearch API documentation} */ - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobUpdateStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 6d0d2bb62..62dad1aaa 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -46,7 +46,7 @@ export default class DanglingIndices { /** * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-gateway-dangling-indices.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-delete.html | Elasticsearch API documentation} */ async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -78,7 +78,7 @@ export default class DanglingIndices { /** * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-gateway-dangling-indices.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-import.html | Elasticsearch API documentation} */ async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -110,7 +110,7 @@ export default class DanglingIndices { /** * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-gateway-dangling-indices.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-indices-list.html | Elasticsearch API documentation} */ async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 34f2214e0..61e093f40 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -78,7 +78,7 @@ export default class Indices { } /** - * Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-analyze.html | Elasticsearch API documentation} */ async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -130,7 +130,7 @@ export default class Indices { } /** - * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clearcache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -170,7 +170,7 @@ export default class Indices { } /** - * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. + * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. **Monitor the cloning process** The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. **Wait for active shards** Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clone-index.html | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -247,7 +247,7 @@ export default class Indices { } /** - * Create an index. Creates a new index. + * Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: * Settings for the index. * Mappings for fields in the index. * Index aliases **Wait for active shards** By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-create-index.html | Elasticsearch API documentation} */ async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -363,7 +363,7 @@ export default class Indices { } /** - * Delete indices. Deletes one or more indices. + * Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-index.html | Elasticsearch API documentation} */ async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -396,7 +396,7 @@ export default class Indices { /** * Delete an alias. Removes a data stream or index from an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-alias.html | Elasticsearch API documentation} */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -531,7 +531,7 @@ export default class Indices { } /** - * Deletes a legacy index template. + * Delete a legacy index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template-v1.html | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -563,7 +563,7 @@ export default class Indices { } /** - * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. + * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-disk-usage.html | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -633,7 +633,7 @@ export default class Indices { } /** - * Check indices. Checks if one or more indices, index aliases, or data streams exist. + * Check indices. Check if one or more indices, index aliases, or data streams exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-exists.html | Elasticsearch API documentation} */ async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -737,7 +737,7 @@ export default class Indices { } /** - * Check existence of index templates. Returns information about whether a particular index template exists. + * Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-template-exists-v1.html | Elasticsearch API documentation} */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -801,7 +801,7 @@ export default class Indices { } /** - * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. + * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -873,7 +873,7 @@ export default class Indices { } /** - * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. + * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. **Blocks during a force merge** Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. **Force merging multiple indices** You can force merge multiple indices with a single request by targeting: * One or more data streams that contain multiple backing indices * Multiple indices * One or more aliases * All data streams and indices in a cluster Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. **Data streams and time-based indices** Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -913,7 +913,7 @@ export default class Indices { } /** - * Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. + * Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-index.html | Elasticsearch API documentation} */ async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -946,7 +946,6 @@ export default class Indices { /** * Get aliases. Retrieves information for one or more data stream or index aliases. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1023,6 +1022,36 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-get-lifecycle-stats.html | Elasticsearch API documentation} + */ + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_lifecycle/stats' + const meta: TransportRequestMetadata = { + name: 'indices.get_data_lifecycle_stats' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get data streams. Retrieves information about one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html | Elasticsearch API documentation} @@ -1064,7 +1093,7 @@ export default class Indices { } /** - * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. + * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-field-mapping.html | Elasticsearch API documentation} */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1104,7 +1133,7 @@ export default class Indices { } /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html | Elasticsearch API documentation} */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1144,7 +1173,7 @@ export default class Indices { } /** - * Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html | Elasticsearch API documentation} */ async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1184,7 +1213,7 @@ export default class Indices { } /** - * Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1231,7 +1260,7 @@ export default class Indices { } /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1344,7 +1373,7 @@ export default class Indices { } /** - * Opens a closed index. For data streams, the API opens any closed backing indices. + * Open a closed index. For data streams, the API opens any closed backing indices. A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-open-close.html | Elasticsearch API documentation} */ async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1504,7 +1533,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-template.html | Elasticsearch API documentation} */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1548,7 +1577,7 @@ export default class Indices { } /** - * Update field mappings. Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. + * Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. **Add multi-fields to an existing field** Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. **Change supported mapping parameters for an existing field** The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. **Change the mapping of an existing field** Except for supported mapping parameters, you can't change the mapping or field type of an existing field. Changing an existing field could invalidate data that's already indexed. If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. **Rename a field** Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-mapping.html | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1592,7 +1621,7 @@ export default class Indices { } /** - * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1636,7 +1665,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1680,7 +1709,7 @@ export default class Indices { } /** - * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. + * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1720,7 +1749,7 @@ export default class Indices { } /** - * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. + * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1792,7 +1821,7 @@ export default class Indices { } /** - * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. **Advantages of using this endpoint before a cross-cluster search** You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1856,7 +1885,7 @@ export default class Indices { } /** - * Roll over to a new index. Creates a new index for a data stream or index alias. + * Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-rollover-index.html | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2033,7 +2062,7 @@ export default class Indices { } /** - * Simulate an index. Returns the index configuration that would be applied to the specified index from an existing index template. + * Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-index.html | Elasticsearch API documentation} */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2065,7 +2094,7 @@ export default class Indices { } /** - * Simulate an index template. Returns the index configuration that would be applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied by a particular index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html | Elasticsearch API documentation} */ async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2117,7 +2146,7 @@ export default class Indices { } /** - * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. + * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. You can do make an index read-only with the following request using the add index block API: ``` PUT /my_source_index/_block/write ``` The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-split-index.html | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 598225e02..e4d60039d 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -39,8 +39,8 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Get cluster info. Returns basic information about the cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/index.html | Elasticsearch API documentation} + * Get cluster info. Get basic build, version, and cluster information. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rest-api-root.html | Elasticsearch API documentation} */ export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 8814df08b..05e506b4f 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -77,7 +77,7 @@ export default class Ingest { } /** - * Deletes an IP location database configuration. + * Delete IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-ip-location-database-api.html | Elasticsearch API documentation} */ async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -211,7 +211,7 @@ export default class Ingest { } /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ip-location-database-api.html | Elasticsearch API documentation} */ async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -321,7 +321,7 @@ export default class Ingest { } /** - * Create or update GeoIP database configurations. Create or update IP geolocation database configurations. + * Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-geoip-database-api.html | Elasticsearch API documentation} */ async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -365,7 +365,7 @@ export default class Ingest { } /** - * Returns information about one or more IP location database configurations. + * Create or update an IP geolocation database configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-ip-location-database-api.html | Elasticsearch API documentation} */ async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 2a9f98b9f..6bf3ae4a8 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -45,7 +45,7 @@ export default class Logstash { } /** - * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 379cb575f..4b0988ada 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -45,7 +45,7 @@ export default class Migration { } /** - * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html | Elasticsearch API documentation} */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -85,8 +85,8 @@ export default class Migration { } /** - * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-feature-upgrade.html | Elasticsearch API documentation} + * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html | Elasticsearch API documentation} */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -116,7 +116,7 @@ export default class Migration { /** * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-feature-upgrade.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html | Elasticsearch API documentation} */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 08bac2455..950bbcafd 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1609,7 +1609,7 @@ export default class Ml { } /** - * Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. + * Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ml-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1945,7 +1945,7 @@ export default class Ml { } /** - * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. + * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. If the destination index does not exist, it is created automatically when you start the job. If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-dfanalytics.html | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1989,7 +1989,7 @@ export default class Ml { } /** - * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-datafeed.html | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2077,7 +2077,7 @@ export default class Ml { } /** - * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. + * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-job.html | Elasticsearch API documentation} */ async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2908,7 +2908,7 @@ export default class Ml { } /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/machine-learning/8.17/ml-jobs.html | Elasticsearch API documentation} */ async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2950,7 +2950,7 @@ export default class Ml { } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/machine-learning/8.17/ml-jobs.html | Elasticsearch API documentation} */ async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index c86b4fa35..352915b4c 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -45,7 +45,7 @@ export default class QueryRules { } /** - * Delete a query rule. Delete a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-rule.html | Elasticsearch API documentation} */ async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -78,7 +78,7 @@ export default class QueryRules { } /** - * Delete a query ruleset. + * Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-ruleset.html | Elasticsearch API documentation} */ async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -205,7 +205,7 @@ export default class QueryRules { } /** - * Create or update a query rule. Create or update a query rule within a query ruleset. + * Create or update a query rule. Create or update a query rule within a query ruleset. IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-rule.html | Elasticsearch API documentation} */ async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -250,7 +250,7 @@ export default class QueryRules { } /** - * Create or update a query ruleset. + * Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-ruleset.html | Elasticsearch API documentation} */ async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index df3aafe6c..b3e0d6a11 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -233,7 +233,7 @@ export default class Rollup { } /** - * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -309,7 +309,7 @@ export default class Rollup { } /** - * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ``` The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html | Elasticsearch API documentation} */ async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index 0316026b0..64cab2ab2 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -211,22 +211,27 @@ export default class SearchApplication { } /** - * Creates a behavioral analytics event for existing collection. - * @see {@link http://todo.com/tbd | Elasticsearch API documentation} + * Create a behavioral analytics collection event. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/post-analytics-collection-event.html | Elasticsearch API documentation} */ - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async postBehavioralAnalyticsEvent (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['collection_name', 'event_type'] + const acceptedBody: string[] = ['payload'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: any = params.body ?? undefined - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 013ef8607..a7c0af52b 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -46,7 +46,7 @@ export default class SearchableSnapshots { /** * Get cache statistics. Get statistics about the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-apis.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-cache-stats.html | Elasticsearch API documentation} */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -86,7 +86,7 @@ export default class SearchableSnapshots { /** * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-apis.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-clear-cache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -171,7 +171,7 @@ export default class SearchableSnapshots { /** * Get searchable snapshot statistics. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-apis.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 69c94c326..9ceb151ae 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -198,22 +198,34 @@ export default class Security { } /** - * Updates the attributes of multiple existing API keys. + * Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. It is not possible to update expired or invalidated API keys. This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-bulk-update-api-keys.html | Elasticsearch API documentation} */ - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async bulkUpdateApiKeys (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -564,6 +576,47 @@ export default class Security { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Delegate PKI authentication. This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-as if the user connected directly to Elasticsearch. IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delegate-pki-authentication.html | Elasticsearch API documentation} + */ + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['x509_certificate_chain'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_security/delegate_pki' + const meta: TransportRequestMetadata = { + name: 'security.delegate_pki' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Delete application privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-privilege.html | Elasticsearch API documentation} @@ -1568,22 +1621,34 @@ export default class Security { } /** - * Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-authenticate.html | Elasticsearch API documentation} */ - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async oidcAuthenticate (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1597,22 +1662,34 @@ export default class Security { } /** - * Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API + * Logout of OpenID Connect. Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-logout.html | Elasticsearch API documentation} */ - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async oidcLogout (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['access_token', 'refresh_token'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1626,22 +1703,35 @@ export default class Security { } /** - * Creates an OAuth 2.0 authentication request as a URL string + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-prepare-authentication.html | Elasticsearch API documentation} */ - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async oidcPrepareAuthentication (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index c54e4d94c..5da40a5e8 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -46,7 +46,7 @@ export default class Shutdown { /** * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-shutdown.html | Elasticsearch API documentation} */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -78,7 +78,7 @@ export default class Shutdown { /** * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html | Elasticsearch API documentation} */ async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -117,8 +117,8 @@ export default class Shutdown { } /** - * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} + * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-shutdown.html | Elasticsearch API documentation} */ async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index 03fe8570d..25749e525 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -45,22 +45,34 @@ export default class Simulate { } /** - * Simulates running ingest with example documents. + * Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/simulate-ingest-api.html | Elasticsearch API documentation} */ - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async ingest (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptions): Promise + async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['index'] + const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index a7b8196c0..ec9517a9e 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -78,7 +78,7 @@ export default class Snapshot { /** * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html | Elasticsearch API documentation} */ async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -124,7 +124,7 @@ export default class Snapshot { /** * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html | Elasticsearch API documentation} */ async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -206,7 +206,7 @@ export default class Snapshot { /** * Delete snapshots. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -239,7 +239,7 @@ export default class Snapshot { /** * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html | Elasticsearch API documentation} */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -271,7 +271,7 @@ export default class Snapshot { /** * Get snapshot information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html | Elasticsearch API documentation} */ async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -304,7 +304,7 @@ export default class Snapshot { /** * Get snapshot repository information. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html | Elasticsearch API documentation} */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -376,7 +376,7 @@ export default class Snapshot { /** * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html | Elasticsearch API documentation} */ async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -408,7 +408,7 @@ export default class Snapshot { /** * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html | Elasticsearch API documentation} */ async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -453,7 +453,7 @@ export default class Snapshot { /** * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -497,7 +497,7 @@ export default class Snapshot { /** * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html | Elasticsearch API documentation} */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 4bdc59435..233fc4a34 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -86,7 +86,7 @@ export default class Sql { } /** - * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. + * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-async-sql-search-api.html | Elasticsearch API documentation} */ async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -118,7 +118,7 @@ export default class Sql { } /** - * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. + * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-sql-search-api.html | Elasticsearch API documentation} */ async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -190,7 +190,7 @@ export default class Sql { async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] - const acceptedBody: string[] = ['catalog', 'columnar', 'cursor', 'fetch_size', 'filter', 'query', 'request_timeout', 'page_timeout', 'time_zone', 'field_multi_value_leniency', 'runtime_mappings', 'wait_for_completion_timeout', 'params', 'keep_alive', 'keep_on_completion', 'index_using_frozen'] + const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -224,7 +224,7 @@ export default class Sql { } /** - * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. + * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/sql-translate-api.html | Elasticsearch API documentation} */ async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index ffac1a600..6329c547f 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -45,7 +45,7 @@ export default class Synonyms { } /** - * Delete a synonym set. + * Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code. To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-synonyms-set.html | Elasticsearch API documentation} */ async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -176,7 +176,7 @@ export default class Synonyms { /** * Get all synonym sets. Get a summary of all defined synonym sets. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-synonyms-sets.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-synonyms-set.html | Elasticsearch API documentation} */ async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -205,7 +205,7 @@ export default class Synonyms { } /** - * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. + * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-synonyms-set.html | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -249,7 +249,7 @@ export default class Synonyms { } /** - * Create or update a synonym rule. Create or update a synonym rule in a synonym set. + * Create or update a synonym rule. Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-synonym-rule.html | Elasticsearch API documentation} */ async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 93cd23ceb..eac383f60 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -45,7 +45,7 @@ export default class TextStructure { } /** - * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. + * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-field-structure.html | Elasticsearch API documentation} */ async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -74,7 +74,7 @@ export default class TextStructure { } /** - * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-message-structure.html | Elasticsearch API documentation} */ async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 5e5056706..46b07a5d9 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -45,7 +45,7 @@ export default class Watcher { } /** - * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-ack-watch.html | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -181,7 +181,7 @@ export default class Watcher { } /** - * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. + * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-execute-watch.html | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -233,13 +233,13 @@ export default class Watcher { } /** - * Retrieve settings for the watcher system index + * Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined @@ -249,6 +249,7 @@ export default class Watcher { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -302,7 +303,7 @@ export default class Watcher { async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'transform', 'trigger'] + const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -338,7 +339,7 @@ export default class Watcher { } /** - * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. + * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-query-watches.html | Elasticsearch API documentation} */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -410,7 +411,7 @@ export default class Watcher { } /** - * Get Watcher statistics. + * Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -480,22 +481,35 @@ export default class Watcher { } /** - * Update settings for the watcher system index + * Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-update-settings.html | Elasticsearch API documentation} */ - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/types.ts b/src/api/types.ts index c87ca4668..1a719857f 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -9945,6 +9945,19 @@ export interface ConnectorSyncJobPostResponse { id: Id } +export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { + connector_sync_job_id: Id + deleted_document_count: long + indexed_document_count: long + indexed_document_volume: long + last_seen?: Duration + metadata?: Metadata + total_document_count?: integer +} + +export interface ConnectorSyncJobUpdateStatsResponse { +} + export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { connector_id: Id } @@ -11698,6 +11711,22 @@ export interface IndicesGetDataLifecycleResponse { data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] } +export interface IndicesGetDataLifecycleStatsDataStreamStats { + backing_indices_in_error: integer + backing_indices_in_total: integer + name: DataStreamName +} + +export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { +} + +export interface IndicesGetDataLifecycleStatsResponse { + data_stream_count: integer + data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] + last_run_duration_in_millis?: DurationValue + time_between_starts_in_millis?: DurationValue +} + export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards @@ -12759,6 +12788,24 @@ export interface IngestDissectProcessor extends IngestProcessorBase { pattern: string } +export interface IngestDocument { + _id?: Id + _index?: IndexName + _source: any +} + +export interface IngestDocumentSimulationKeys { + _id: Id + _index: IndexName + _ingest: IngestIngest + _routing?: string + _source: Record + _version?: SpecUtilsStringified + _version_type?: VersionType +} +export type IngestDocumentSimulation = IngestDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } + export interface IngestDotExpanderProcessor extends IngestProcessorBase { field: Field override?: boolean @@ -12872,6 +12919,12 @@ export interface IngestInferenceProcessor extends IngestProcessorBase { inference_config?: IngestInferenceConfig } +export interface IngestIngest { + _redact?: IngestRedact + timestamp: DateTime + pipeline?: Name +} + export interface IngestIpLocationProcessor extends IngestProcessorBase { database_file?: string field: Field @@ -12958,6 +13011,16 @@ export interface IngestPipelineProcessor extends IngestProcessorBase { ignore_missing_pipeline?: boolean } +export interface IngestPipelineSimulation { + doc?: IngestDocumentSimulation + tag?: string + processor_type?: string + status?: WatcherActionStatusOptions + description?: string + ignored_error?: ErrorCause + error?: ErrorCause +} + export interface IngestProcessorBase { description?: string if?: string @@ -13014,6 +13077,10 @@ export interface IngestProcessorContainer { user_agent?: IngestUserAgentProcessor } +export interface IngestRedact { + _is_redacted: boolean +} + export interface IngestRedactProcessor extends IngestProcessorBase { field: Field patterns: GrokPattern[] @@ -13072,6 +13139,12 @@ export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { export type IngestShapeType = 'geo_shape' | 'shape' +export interface IngestSimulateDocumentResult { + doc?: IngestDocumentSimulation + error?: ErrorCause + processor_results?: IngestPipelineSimulation[] +} + export interface IngestSortProcessor extends IngestProcessorBase { field: Field order?: SortOrder @@ -13261,59 +13334,15 @@ export interface IngestPutPipelineRequest extends RequestBase { export type IngestPutPipelineResponse = AcknowledgedResponseBase -export interface IngestSimulateDocument { - _id?: Id - _index?: IndexName - _source: any -} - -export interface IngestSimulateDocumentSimulationKeys { - _id: Id - _index: IndexName - _ingest: IngestSimulateIngest - _routing?: string - _source: Record - _version?: SpecUtilsStringified - _version_type?: VersionType -} -export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationKeys -& { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } - -export interface IngestSimulateIngest { - _redact?: IngestSimulateRedact - timestamp: DateTime - pipeline?: Name -} - -export interface IngestSimulatePipelineSimulation { - doc?: IngestSimulateDocumentSimulation - tag?: string - processor_type?: string - status?: WatcherActionStatusOptions - description?: string - ignored_error?: ErrorCause - error?: ErrorCause -} - -export interface IngestSimulateRedact { - _is_redacted: boolean -} - export interface IngestSimulateRequest extends RequestBase { id?: Id verbose?: boolean - docs: IngestSimulateDocument[] + docs: IngestDocument[] pipeline?: IngestPipeline } export interface IngestSimulateResponse { - docs: IngestSimulateSimulateDocumentResult[] -} - -export interface IngestSimulateSimulateDocumentResult { - doc?: IngestSimulateDocumentSimulation - error?: ErrorCause - processor_results?: IngestSimulatePipelineSimulation[] + docs: IngestSimulateDocumentResult[] } export interface LicenseLicense { @@ -17313,6 +17342,8 @@ export interface SearchApplicationEventDataStream { name: IndexName } +export type SearchApplicationEventType = 'page_view' | 'search' | 'search_click' + export interface SearchApplicationSearchApplication { name: Name indices: IndexName[] @@ -17367,6 +17398,18 @@ export interface SearchApplicationListSearchApplicationListItem { analytics_collection_name?: Name } +export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { + collection_name: Name + event_type: SearchApplicationEventType + debug?: boolean + payload?: any +} + +export interface SearchApplicationPostBehavioralAnalyticsEventResponse { + accepted: boolean + event?: any +} + export interface SearchApplicationPutRequest extends RequestBase { name: Name create?: boolean @@ -17767,6 +17810,19 @@ export interface SecurityBulkPutRoleResponse { errors?: SecurityBulkError } +export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { + expiration?: Duration + ids: string | string[] + metadata?: Metadata + role_descriptors?: Record +} + +export interface SecurityBulkUpdateApiKeysResponse { + errors?: SecurityBulkError + noops: string[] + updated: string[] +} + export interface SecurityChangePasswordRequest extends RequestBase { username?: Username refresh?: Refresh @@ -17878,6 +17934,37 @@ export interface SecurityCreateServiceTokenToken { value: string } +export interface SecurityDelegatePkiAuthentication { + username: string + roles: string[] + full_name: string | null + email: string | null + token?: Record + metadata: Metadata + enabled: boolean + authentication_realm: SecurityDelegatePkiAuthenticationRealm + lookup_realm: SecurityDelegatePkiAuthenticationRealm + authentication_type: string + api_key?: Record +} + +export interface SecurityDelegatePkiAuthenticationRealm { + name: string + type: string + domain?: string +} + +export interface SecurityDelegatePkiRequest extends RequestBase { + x509_certificate_chain: string[] +} + +export interface SecurityDelegatePkiResponse { + access_token: string + expires_in: long + type: string + authentication?: SecurityDelegatePkiAuthentication +} + export interface SecurityDeletePrivilegesFoundStatus { found: boolean } @@ -18255,6 +18342,44 @@ export interface SecurityInvalidateTokenResponse { previously_invalidated_tokens: long } +export interface SecurityOidcAuthenticateRequest extends RequestBase { + nonce: string + realm?: string + redirect_uri: string + state: string +} + +export interface SecurityOidcAuthenticateResponse { + access_token: string + expires_in: integer + refresh_token: string + type: string +} + +export interface SecurityOidcLogoutRequest extends RequestBase { + access_token: string + refresh_token?: string +} + +export interface SecurityOidcLogoutResponse { + redirect: string +} + +export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { + iss?: string + login_hint?: string + nonce?: string + realm?: string + state?: string +} + +export interface SecurityOidcPrepareAuthenticationResponse { + nonce: string + realm: string + redirect: string + state: string +} + export interface SecurityPutPrivilegesActions { actions: string[] application?: string @@ -18624,6 +18749,20 @@ export interface ShutdownPutNodeRequest extends RequestBase { export type ShutdownPutNodeResponse = AcknowledgedResponseBase +export interface SimulateIngestRequest extends RequestBase { + index?: IndexName + pipeline?: PipelineName + docs: IngestDocument[] + component_template_substitutions?: Record + index_template_subtitutions?: Record + mapping_addition?: MappingTypeMapping + pipeline_substitutions?: Record +} + +export interface SimulateIngestResponse { + docs: IngestSimulateDocumentResult[] +} + export interface SlmConfiguration { ignore_unavailable?: boolean indices?: Indices @@ -18691,12 +18830,16 @@ export interface SlmStatistics { export interface SlmDeleteLifecycleRequest extends RequestBase { policy_id: Name + master_timeout?: Duration + timeout?: Duration } export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { policy_id: Name + master_timeout?: Duration + timeout?: Duration } export interface SlmExecuteLifecycleResponse { @@ -18704,17 +18847,23 @@ export interface SlmExecuteLifecycleResponse { } export interface SlmExecuteRetentionRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { policy_id?: Names + master_timeout?: Duration + timeout?: Duration } export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export interface SlmGetStatsResponse { @@ -18731,6 +18880,8 @@ export interface SlmGetStatsResponse { } export interface SlmGetStatusRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export interface SlmGetStatusResponse { @@ -18751,11 +18902,15 @@ export interface SlmPutLifecycleRequest extends RequestBase { export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmStopResponse = AcknowledgedResponseBase @@ -19175,40 +19330,41 @@ export interface SqlGetAsyncStatusRequest extends RequestBase { } export interface SqlGetAsyncStatusResponse { + expiration_time_in_millis: EpochTime id: string is_running: boolean is_partial: boolean start_time_in_millis: EpochTime - expiration_time_in_millis: EpochTime completion_status?: uint } export interface SqlQueryRequest extends RequestBase { format?: SqlQuerySqlFormat + allow_partial_search_results?: boolean catalog?: string columnar?: boolean cursor?: string fetch_size?: integer + field_multi_value_leniency?: boolean filter?: QueryDslQueryContainer + index_using_frozen?: boolean + keep_alive?: Duration + keep_on_completion?: boolean + page_timeout?: Duration + params?: Record query?: string request_timeout?: Duration - page_timeout?: Duration - time_zone?: TimeZone - field_multi_value_leniency?: boolean runtime_mappings?: MappingRuntimeFields + time_zone?: TimeZone wait_for_completion_timeout?: Duration - params?: Record - keep_alive?: Duration - keep_on_completion?: boolean - index_using_frozen?: boolean } export interface SqlQueryResponse { + columns?: SqlColumn[] + cursor?: string id?: Id is_running?: boolean is_partial?: boolean - columns?: SqlColumn[] - cursor?: string rows: SqlRow[] } @@ -20419,6 +20575,14 @@ export interface WatcherExecuteWatchWatchRecord { status?: WatcherWatchStatus } +export interface WatcherGetSettingsRequest extends RequestBase { + master_timeout?: Duration +} + +export interface WatcherGetSettingsResponse { + index: IndicesIndexSettings +} + export interface WatcherGetWatchRequest extends RequestBase { id: Name } @@ -20443,7 +20607,8 @@ export interface WatcherPutWatchRequest extends RequestBase { condition?: WatcherConditionContainer input?: WatcherInputContainer metadata?: Metadata - throttle_period?: string + throttle_period?: Duration + throttle_period_in_millis?: DurationValue transform?: TransformContainer trigger?: WatcherTriggerContainer } @@ -20512,10 +20677,22 @@ export interface WatcherStatsWatcherNodeStats { export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' export interface WatcherStopRequest extends RequestBase { + master_timeout?: Duration } export type WatcherStopResponse = AcknowledgedResponseBase +export interface WatcherUpdateSettingsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration + 'index.auto_expand_replicas'?: string + 'index.number_of_replicas'?: integer +} + +export interface WatcherUpdateSettingsResponse { + acknowledged: boolean +} + export interface XpackInfoBuildInformation { date: DateTime hash: string diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 08df31aa9..e93f6a56c 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -10067,6 +10067,22 @@ export interface ConnectorSyncJobPostResponse { id: Id } +export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { + connector_sync_job_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + deleted_document_count: long + indexed_document_count: long + indexed_document_volume: long + last_seen?: Duration + metadata?: Metadata + total_document_count?: integer + } +} + +export interface ConnectorSyncJobUpdateStatsResponse { +} + export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { connector_id: Id } @@ -11894,6 +11910,22 @@ export interface IndicesGetDataLifecycleResponse { data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] } +export interface IndicesGetDataLifecycleStatsDataStreamStats { + backing_indices_in_error: integer + backing_indices_in_total: integer + name: DataStreamName +} + +export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { +} + +export interface IndicesGetDataLifecycleStatsResponse { + data_stream_count: integer + data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] + last_run_duration_in_millis?: DurationValue + time_between_starts_in_millis?: DurationValue +} + export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards @@ -12996,6 +13028,24 @@ export interface IngestDissectProcessor extends IngestProcessorBase { pattern: string } +export interface IngestDocument { + _id?: Id + _index?: IndexName + _source: any +} + +export interface IngestDocumentSimulationKeys { + _id: Id + _index: IndexName + _ingest: IngestIngest + _routing?: string + _source: Record + _version?: SpecUtilsStringified + _version_type?: VersionType +} +export type IngestDocumentSimulation = IngestDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } + export interface IngestDotExpanderProcessor extends IngestProcessorBase { field: Field override?: boolean @@ -13109,6 +13159,12 @@ export interface IngestInferenceProcessor extends IngestProcessorBase { inference_config?: IngestInferenceConfig } +export interface IngestIngest { + _redact?: IngestRedact + timestamp: DateTime + pipeline?: Name +} + export interface IngestIpLocationProcessor extends IngestProcessorBase { database_file?: string field: Field @@ -13195,6 +13251,16 @@ export interface IngestPipelineProcessor extends IngestProcessorBase { ignore_missing_pipeline?: boolean } +export interface IngestPipelineSimulation { + doc?: IngestDocumentSimulation + tag?: string + processor_type?: string + status?: WatcherActionStatusOptions + description?: string + ignored_error?: ErrorCause + error?: ErrorCause +} + export interface IngestProcessorBase { description?: string if?: string @@ -13251,6 +13317,10 @@ export interface IngestProcessorContainer { user_agent?: IngestUserAgentProcessor } +export interface IngestRedact { + _is_redacted: boolean +} + export interface IngestRedactProcessor extends IngestProcessorBase { field: Field patterns: GrokPattern[] @@ -13309,6 +13379,12 @@ export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { export type IngestShapeType = 'geo_shape' | 'shape' +export interface IngestSimulateDocumentResult { + doc?: IngestDocumentSimulation + error?: ErrorCause + processor_results?: IngestPipelineSimulation[] +} + export interface IngestSortProcessor extends IngestProcessorBase { field: Field order?: SortOrder @@ -13505,62 +13581,18 @@ export interface IngestPutPipelineRequest extends RequestBase { export type IngestPutPipelineResponse = AcknowledgedResponseBase -export interface IngestSimulateDocument { - _id?: Id - _index?: IndexName - _source: any -} - -export interface IngestSimulateDocumentSimulationKeys { - _id: Id - _index: IndexName - _ingest: IngestSimulateIngest - _routing?: string - _source: Record - _version?: SpecUtilsStringified - _version_type?: VersionType -} -export type IngestSimulateDocumentSimulation = IngestSimulateDocumentSimulationKeys -& { [property: string]: string | Id | IndexName | IngestSimulateIngest | Record | SpecUtilsStringified | VersionType } - -export interface IngestSimulateIngest { - _redact?: IngestSimulateRedact - timestamp: DateTime - pipeline?: Name -} - -export interface IngestSimulatePipelineSimulation { - doc?: IngestSimulateDocumentSimulation - tag?: string - processor_type?: string - status?: WatcherActionStatusOptions - description?: string - ignored_error?: ErrorCause - error?: ErrorCause -} - -export interface IngestSimulateRedact { - _is_redacted: boolean -} - export interface IngestSimulateRequest extends RequestBase { id?: Id verbose?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - docs: IngestSimulateDocument[] + docs: IngestDocument[] pipeline?: IngestPipeline } } export interface IngestSimulateResponse { - docs: IngestSimulateSimulateDocumentResult[] -} - -export interface IngestSimulateSimulateDocumentResult { - doc?: IngestSimulateDocumentSimulation - error?: ErrorCause - processor_results?: IngestSimulatePipelineSimulation[] + docs: IngestSimulateDocumentResult[] } export interface LicenseLicense { @@ -17697,6 +17729,8 @@ export interface SearchApplicationEventDataStream { name: IndexName } +export type SearchApplicationEventType = 'page_view' | 'search' | 'search_click' + export interface SearchApplicationSearchApplication { name: Name indices: IndexName[] @@ -17751,6 +17785,19 @@ export interface SearchApplicationListSearchApplicationListItem { analytics_collection_name?: Name } +export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { + collection_name: Name + event_type: SearchApplicationEventType + debug?: boolean + /** @deprecated The use of the 'body' key has been deprecated, use 'payload' instead. */ + body?: any +} + +export interface SearchApplicationPostBehavioralAnalyticsEventResponse { + accepted: boolean + event?: any +} + export interface SearchApplicationPutRequest extends RequestBase { name: Name create?: boolean @@ -18170,6 +18217,22 @@ export interface SecurityBulkPutRoleResponse { errors?: SecurityBulkError } +export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + expiration?: Duration + ids: string | string[] + metadata?: Metadata + role_descriptors?: Record + } +} + +export interface SecurityBulkUpdateApiKeysResponse { + errors?: SecurityBulkError + noops: string[] + updated: string[] +} + export interface SecurityChangePasswordRequest extends RequestBase { username?: Username refresh?: Refresh @@ -18290,6 +18353,40 @@ export interface SecurityCreateServiceTokenToken { value: string } +export interface SecurityDelegatePkiAuthentication { + username: string + roles: string[] + full_name: string | null + email: string | null + token?: Record + metadata: Metadata + enabled: boolean + authentication_realm: SecurityDelegatePkiAuthenticationRealm + lookup_realm: SecurityDelegatePkiAuthenticationRealm + authentication_type: string + api_key?: Record +} + +export interface SecurityDelegatePkiAuthenticationRealm { + name: string + type: string + domain?: string +} + +export interface SecurityDelegatePkiRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + x509_certificate_chain: string[] + } +} + +export interface SecurityDelegatePkiResponse { + access_token: string + expires_in: long + type: string + authentication?: SecurityDelegatePkiAuthentication +} + export interface SecurityDeletePrivilegesFoundStatus { found: boolean } @@ -18685,6 +18782,53 @@ export interface SecurityInvalidateTokenResponse { previously_invalidated_tokens: long } +export interface SecurityOidcAuthenticateRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + nonce: string + realm?: string + redirect_uri: string + state: string + } +} + +export interface SecurityOidcAuthenticateResponse { + access_token: string + expires_in: integer + refresh_token: string + type: string +} + +export interface SecurityOidcLogoutRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + access_token: string + refresh_token?: string + } +} + +export interface SecurityOidcLogoutResponse { + redirect: string +} + +export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + iss?: string + login_hint?: string + nonce?: string + realm?: string + state?: string + } +} + +export interface SecurityOidcPrepareAuthenticationResponse { + nonce: string + realm: string + redirect: string + state: string +} + export interface SecurityPutPrivilegesActions { actions: string[] application?: string @@ -19104,6 +19248,23 @@ export interface ShutdownPutNodeRequest extends RequestBase { export type ShutdownPutNodeResponse = AcknowledgedResponseBase +export interface SimulateIngestRequest extends RequestBase { + index?: IndexName + pipeline?: PipelineName + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + docs: IngestDocument[] + component_template_substitutions?: Record + index_template_subtitutions?: Record + mapping_addition?: MappingTypeMapping + pipeline_substitutions?: Record + } +} + +export interface SimulateIngestResponse { + docs: IngestSimulateDocumentResult[] +} + export interface SlmConfiguration { ignore_unavailable?: boolean indices?: Indices @@ -19171,12 +19332,16 @@ export interface SlmStatistics { export interface SlmDeleteLifecycleRequest extends RequestBase { policy_id: Name + master_timeout?: Duration + timeout?: Duration } export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { policy_id: Name + master_timeout?: Duration + timeout?: Duration } export interface SlmExecuteLifecycleResponse { @@ -19184,17 +19349,23 @@ export interface SlmExecuteLifecycleResponse { } export interface SlmExecuteRetentionRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { policy_id?: Names + master_timeout?: Duration + timeout?: Duration } export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export interface SlmGetStatsResponse { @@ -19211,6 +19382,8 @@ export interface SlmGetStatsResponse { } export interface SlmGetStatusRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export interface SlmGetStatusResponse { @@ -19234,11 +19407,15 @@ export interface SlmPutLifecycleRequest extends RequestBase { export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration } export type SlmStopResponse = AcknowledgedResponseBase @@ -19671,11 +19848,11 @@ export interface SqlGetAsyncStatusRequest extends RequestBase { } export interface SqlGetAsyncStatusResponse { + expiration_time_in_millis: EpochTime id: string is_running: boolean is_partial: boolean start_time_in_millis: EpochTime - expiration_time_in_millis: EpochTime completion_status?: uint } @@ -19683,31 +19860,32 @@ export interface SqlQueryRequest extends RequestBase { format?: SqlQuerySqlFormat /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + allow_partial_search_results?: boolean catalog?: string columnar?: boolean cursor?: string fetch_size?: integer + field_multi_value_leniency?: boolean filter?: QueryDslQueryContainer + index_using_frozen?: boolean + keep_alive?: Duration + keep_on_completion?: boolean + page_timeout?: Duration + params?: Record query?: string request_timeout?: Duration - page_timeout?: Duration - time_zone?: TimeZone - field_multi_value_leniency?: boolean runtime_mappings?: MappingRuntimeFields + time_zone?: TimeZone wait_for_completion_timeout?: Duration - params?: Record - keep_alive?: Duration - keep_on_completion?: boolean - index_using_frozen?: boolean } } export interface SqlQueryResponse { + columns?: SqlColumn[] + cursor?: string id?: Id is_running?: boolean is_partial?: boolean - columns?: SqlColumn[] - cursor?: string rows: SqlRow[] } @@ -20946,6 +21124,14 @@ export interface WatcherExecuteWatchWatchRecord { status?: WatcherWatchStatus } +export interface WatcherGetSettingsRequest extends RequestBase { + master_timeout?: Duration +} + +export interface WatcherGetSettingsResponse { + index: IndicesIndexSettings +} + export interface WatcherGetWatchRequest extends RequestBase { id: Name } @@ -20972,7 +21158,8 @@ export interface WatcherPutWatchRequest extends RequestBase { condition?: WatcherConditionContainer input?: WatcherInputContainer metadata?: Metadata - throttle_period?: string + throttle_period?: Duration + throttle_period_in_millis?: DurationValue transform?: TransformContainer trigger?: WatcherTriggerContainer } @@ -21045,10 +21232,25 @@ export interface WatcherStatsWatcherNodeStats { export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' export interface WatcherStopRequest extends RequestBase { + master_timeout?: Duration } export type WatcherStopResponse = AcknowledgedResponseBase +export interface WatcherUpdateSettingsRequest extends RequestBase { + master_timeout?: Duration + timeout?: Duration + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + 'index.auto_expand_replicas'?: string + 'index.number_of_replicas'?: integer + } +} + +export interface WatcherUpdateSettingsResponse { + acknowledged: boolean +} + export interface XpackInfoBuildInformation { date: DateTime hash: string