diff --git a/docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc similarity index 53% rename from docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc rename to docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc index 3b4f9251b..84abd3971 100644 --- a/docs/doc_examples/e20037f66bf54bcac7d10f536f031f34.asciidoc +++ b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc @@ -3,11 +3,8 @@ [source, js] ---- -const response = await client.indices.putSettings({ - index: "my-index-000001", - settings: { - "index.blocks.read_only_allow_delete": null, - }, +const response = await client.indices.getMapping({ + index: "kibana_sample_data_ecommerce", }); console.log(response); ---- diff --git a/docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc b/docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc similarity index 85% rename from docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc rename to docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc index b0acfaa1d..9d98d539a 100644 --- a/docs/doc_examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc +++ b/docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc @@ -11,6 +11,8 @@ const response = await client.indices.putSettings({ "index.indexing.slowlog.threshold.index.debug": "2s", "index.indexing.slowlog.threshold.index.trace": "500ms", "index.indexing.slowlog.source": "1000", + "index.indexing.slowlog.reformat": true, + "index.indexing.slowlog.include.user": true, }, }); console.log(response); diff --git a/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc b/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc new file mode 100644 index 000000000..269067032 --- /dev/null +++ b/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-bit", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "bit", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-rank-vectors-bit", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc index 5b0c7d4e7..506e4ff5b 100644 --- a/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc +++ b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -3,8 +3,12 @@ [source, js] ---- -const response = await client.esql.asyncQuery({ - format: "json", +const response = await client.transport.request({ + method: "POST", + path: "/_query/async", + querystring: { + format: "json", + }, body: { query: "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", diff --git a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc index 3801b625f..045036fa2 100644 --- a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc +++ b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -3,8 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.renderQuery({ - name: "my-app", +const response = await client.transport.request({ + method: "POST", + path: "/_application/search_application/my-app/_render_query", body: { params: { query_string: "my first query", diff --git a/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc b/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc deleted file mode 100644 index aac173f77..000000000 --- a/docs/doc_examples/0e83f140237d75469a428ff403564bb5.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.cluster.putSettings({ - persistent: { - "cluster.routing.allocation.disk.watermark.low": "100gb", - "cluster.routing.allocation.disk.watermark.high": "50gb", - "cluster.routing.allocation.disk.watermark.flood_stage": "10gb", - "cluster.info.update.interval": "1m", - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc b/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc deleted file mode 100644 index ce7709b43..000000000 --- a/docs/doc_examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: "test-index", - query: { - nested: { - path: "inference_field.inference.chunks", - query: { - sparse_vector: { - field: "inference_field.inference.chunks.embeddings", - inference_id: "my-inference-id", - query: "mountain lake", - }, - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc index 64aa8e2d1..8771d32f7 100644 --- a/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc +++ b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc @@ -11,7 +11,7 @@ const response = await client.searchApplication.put({ script: { lang: "mustache", source: - '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', params: { query: "", _es_filters: {}, diff --git a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc index c5453ffaf..724b30762 100644 --- a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc +++ b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc b/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc new file mode 100644 index 000000000..deabe9511 --- /dev/null +++ b/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-bit", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "bit", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-rank-vectors-bit", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-rank-vectors-bit", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "maxSimDotProduct(params.query_vector, 'my_vector')", + params: { + query_vector: [ + [ + 0.35, 0.77, 0.95, 0.15, 0.11, 0.08, 0.58, 0.06, 0.44, 0.52, 0.21, + 0.62, 0.65, 0.16, 0.64, 0.39, 0.93, 0.06, 0.93, 0.31, 0.92, 0, + 0.66, 0.86, 0.92, 0.03, 0.81, 0.31, 0.2, 0.92, 0.95, 0.64, 0.19, + 0.26, 0.77, 0.64, 0.78, 0.32, 0.97, 0.84, + ], + ], + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc index 4853ab9a3..b3545c105 100644 --- a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc +++ b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcLogout({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/logout", body: { token: "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", diff --git a/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc b/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc new file mode 100644 index 000000000..07c3eb29d --- /dev/null +++ b/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-rank-vectors-float", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "maxSimDotProduct(params.query_vector, 'my_vector')", + params: { + query_vector: [ + [0.5, 10, 6], + [-0.5, 10, 10], + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index 44648b27c..e05299751 100644 --- a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -3,10 +3,12 @@ [source, js] ---- -const response = await client.esql.asyncQueryGet({ - id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", - wait_for_completion_timeout: "30s", - body: null, +const response = await client.transport.request({ + method: "GET", + path: "/_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + querystring: { + wait_for_completion_timeout: "30s", + }, }); console.log(response); ---- diff --git a/docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc b/docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc similarity index 93% rename from docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc rename to docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc index 693b70a4f..f1ba5e168 100644 --- a/docs/doc_examples/23af230e824f48b9cd56a4cf973d788c.asciidoc +++ b/docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc @@ -14,6 +14,7 @@ const response = await client.indices.putSettings({ "index.search.slowlog.threshold.fetch.info": "800ms", "index.search.slowlog.threshold.fetch.debug": "500ms", "index.search.slowlog.threshold.fetch.trace": "200ms", + "index.search.slowlog.include.user": true, }, }); console.log(response); diff --git a/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc b/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc new file mode 100644 index 000000000..8651f44c6 --- /dev/null +++ b/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + size: 10, + retriever: { + rescorer: { + rescore: { + query: { + window_size: 50, + rescore_query: { + script_score: { + script: { + source: + "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", + params: { + queryVector: [-0.5, 90, -10, 14.8, -156], + }, + }, + }, + }, + }, + }, + retriever: { + rrf: { + rank_window_size: 100, + retrievers: [ + { + standard: { + query: { + sparse_vector: { + field: "plot_embedding", + inference_id: "my-elser-model", + query: "films that explore psychological depths", + }, + }, + }, + }, + { + standard: { + query: { + multi_match: { + query: "crime", + fields: ["plot", "title"], + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc b/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc new file mode 100644 index 000000000..32f004a99 --- /dev/null +++ b/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove_index: { + index: "my-index-2099.05.06-000001", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc index b8e2ede87..221e42b58 100644 --- a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc +++ b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.esql.asyncQuery({ +const response = await client.transport.request({ + method: "POST", + path: "/_query/async", body: { query: "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", diff --git a/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc new file mode 100644 index 000000000..7818a3f0c --- /dev/null +++ b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + order_stats: { + stats: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc index ab0617ea6..40d330c9d 100644 --- a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc +++ b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.esql.asyncQueryGet({ - id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", - body: null, +const response = await client.transport.request({ + method: "GET", + path: "/_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", }); console.log(response); ---- diff --git a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index 5f31b1de6..823515f74 100644 --- a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.inference.streamInference({ - task_type: "completion", - inference_id: "openai-completion", +const response = await client.transport.request({ + method: "POST", + path: "/_inference/completion/openai-completion/_stream", body: { input: "What is Elastic?", }, diff --git a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc index dcf9e4b2e..2598c7bce 100644 --- a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc +++ b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/prepare", body: { realm: "oidc1", state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", diff --git a/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc index c95b379f3..46cd0a13e 100644 --- a/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc +++ b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -3,8 +3,12 @@ [source, js] ---- -const response = await client.esql.asyncQuery({ - format: "json", +const response = await client.transport.request({ + method: "POST", + path: "/_query/async", + querystring: { + format: "json", + }, body: { query: "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", diff --git a/docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc b/docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc similarity index 69% rename from docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc rename to docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc index 9537a8386..3226a57c7 100644 --- a/docs/doc_examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc +++ b/docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc @@ -4,9 +4,11 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: "my-index-000001", + index: "*", settings: { "index.search.slowlog.include.user": true, + "index.search.slowlog.threshold.fetch.warn": "30s", + "index.search.slowlog.threshold.query.warn": "30s", }, }); console.log(response); diff --git a/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc index 3dff97a73..30adbe22e 100644 --- a/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc +++ b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc @@ -11,7 +11,7 @@ const response = await client.searchApplication.put({ script: { lang: "mustache", source: - '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n \n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', params: { query: "", _es_filters: {}, diff --git a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc index 26bbeb20a..f5995e6b6 100644 --- a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc +++ b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/api_key/_bulk_update", body: { ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], }, diff --git a/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc similarity index 76% rename from docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc rename to docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc index 0c7b48ea7..afea3d985 100644 --- a/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc +++ b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc @@ -12,6 +12,13 @@ const response = await client.search({ fields: ["my_field", "my_field._2gram", "my_field._3gram"], }, }, + highlight: { + fields: { + my_field: { + matched_fields: ["my_field._index_prefix"], + }, + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc new file mode 100644 index 000000000..b2400e39b --- /dev/null +++ b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + prefix: { + full_name: { + value: "ki", + }, + }, + }, + highlight: { + fields: { + full_name: { + matched_fields: ["full_name._index_prefix"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc new file mode 100644 index 000000000..047487632 --- /dev/null +++ b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + daily_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + smoothed_revenue: { + moving_fn: { + buckets_path: "daily_revenue", + window: 3, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc b/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc deleted file mode 100644 index b99aa857f..000000000 --- a/docs/doc_examples/74b229a6e020113e5749099451979c89.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: "test-index", - query: { - nested: { - path: "inference_field.inference.chunks", - query: { - knn: { - field: "inference_field.inference.chunks.embeddings", - query_vector_builder: { - text_embedding: { - model_id: "my_inference_id", - model_text: "mountain lake", - }, - }, - }, - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc index 107aebead..ebe4fce86 100644 --- a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc +++ b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.textStructure.findMessageStructure({ +const response = await client.transport.request({ + method: "POST", + path: "/_text_structure/find_message_structure", body: { messages: [ "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", diff --git a/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc new file mode 100644 index 000000000..d1fcf443c --- /dev/null +++ b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_ingest/ip_location/database/my-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc new file mode 100644 index 000000000..733c366ba --- /dev/null +++ b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + unique_customers: { + cardinality: { + field: "customer_id", + }, + }, + avg_basket_size: { + avg: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc index 659fc0e47..28fdff4a5 100644 --- a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc +++ b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/api_key/_bulk_update", body: { ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], role_descriptors: { diff --git a/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc new file mode 100644 index 000000000..aa09492cf --- /dev/null +++ b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + categories: { + terms: { + field: "category.keyword", + size: 5, + order: { + total_revenue: "desc", + }, + }, + aggs: { + total_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + total_items: { + sum: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc new file mode 100644 index 000000000..8d9b9da8b --- /dev/null +++ b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_ingest/ip_location/database/my-database-1", + body: { + name: "GeoIP2-Domain", + maxmind: { + account_id: "1234567", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc new file mode 100644 index 000000000..3f2ffdf6b --- /dev/null +++ b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_ingest/ip_location/database/example-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc index 46c6ad610..0cf3aea4d 100644 --- a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc +++ b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.postBehavioralAnalyticsEvent({ - collection_name: "my_analytics_collection", - event_type: "search_click", +const response = await client.transport.request({ + method: "POST", + path: "/_application/analytics/my_analytics_collection/event/search_click", body: { session: { id: "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9", diff --git a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc index 64e6db48e..8e19908d0 100644 --- a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc +++ b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcAuthenticate({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/authenticate", body: { redirect_uri: "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", diff --git a/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc b/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc new file mode 100644 index 000000000..1bc8b2cc7 --- /dev/null +++ b/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-byte", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "byte", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-rank-vectors-byte", + id: 1, + document: { + my_vector: [ + [1, 2, 3], + [4, 5, 6], + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc index 6b9a54625..afaf9d7dc 100644 --- a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc +++ b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -3,8 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.renderQuery({ - name: "my_search_application", +const response = await client.transport.request({ + method: "POST", + path: "/_application/search_application/my_search_application/_render_query", body: { params: { query_string: "rock climbing", diff --git a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc index 6ce163623..5186df2ae 100644 --- a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc +++ b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -3,9 +3,9 @@ [source, js] ---- -const response = await client.searchApplication.renderQuery({ - name: "my_search_application", - body: null, +const response = await client.transport.request({ + method: "POST", + path: "/_application/search_application/my_search_application/_render_query", }); console.log(response); ---- diff --git a/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc new file mode 100644 index 000000000..f71aebf61 --- /dev/null +++ b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc index 30687161f..57b7fb69d 100644 --- a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc +++ b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -208,10 +208,13 @@ const response = await client.bulk({ }); console.log(response); -const response1 = await client.textStructure.findFieldStructure({ - index: "test-logs", - field: "message", - body: null, +const response1 = await client.transport.request({ + method: "GET", + path: "/_text_structure/find_field_structure", + querystring: { + index: "test-logs", + field: "message", + }, }); console.log(response1); ---- diff --git a/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc new file mode 100644 index 000000000..446bba938 --- /dev/null +++ b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_orders: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + min_doc_count: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc b/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc new file mode 100644 index 000000000..e63a33d34 --- /dev/null +++ b/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "_all", + expand_wildcards: "all", + filter_path: "*.settings.index.*.slowlog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc index 06b5f58ec..80d974285 100644 --- a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc +++ b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc new file mode 100644 index 000000000..4e074487d --- /dev/null +++ b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + sales_by_category: { + terms: { + field: "category.keyword", + size: 5, + order: { + _count: "desc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc new file mode 100644 index 000000000..b518cae85 --- /dev/null +++ b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + cumulative_revenue: { + cumulative_sum: { + buckets_path: "revenue", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc index 2a14bb328..abc332dd4 100644 --- a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc +++ b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.bulkUpdateApiKeys({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/api_key/_bulk_update", body: { ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], role_descriptors: {}, diff --git a/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc index c97a5d54f..d44a7b669 100644 --- a/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc +++ b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc index 51dea2365..21bcc10b8 100644 --- a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc +++ b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/prepare", body: { iss: "http://127.0.0.1:8080", login_hint: "this_is_an_opaque_string", diff --git a/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc new file mode 100644 index 000000000..592744a30 --- /dev/null +++ b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_ingest/ip_location/database/my-database-2", + body: { + name: "standard_location", + ipinfo: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc new file mode 100644 index 000000000..e80e90ffd --- /dev/null +++ b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_ingest/ip_location/database/my-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc index ff630da8a..1fe5e6b4c 100644 --- a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc +++ b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc @@ -3,6 +3,9 @@ [source, js] ---- -const response = await client.security.getSettings(); +const response = await client.transport.request({ + method: "GET", + path: "/_security/settings", +}); console.log(response); ---- diff --git a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc similarity index 92% rename from docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc rename to docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc index da9071e2c..d4a4521d5 100644 --- a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc +++ b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc @@ -7,7 +7,7 @@ const response = await client.inference.put({ task_type: "sparse_embedding", inference_id: "elser_embeddings", inference_config: { - service: "elser", + service: "elasticsearch", service_settings: { num_allocations: 1, num_threads: 1, diff --git a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc index 5a02c157a..febdc3354 100644 --- a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc +++ b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.security.oidcPrepareAuthentication({ +const response = await client.transport.request({ + method: "POST", + path: "/_security/oidc/prepare", body: { realm: "oidc1", }, diff --git a/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc b/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc new file mode 100644 index 000000000..da7018754 --- /dev/null +++ b/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-float", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-rank-vectors-float", + id: 1, + document: { + my_vector: [ + [0.5, 10, 6], + [-0.5, 10, 10], + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc index 92b9b7363..ba52d081d 100644 --- a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc +++ b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.simulate.ingest({ +const response = await client.transport.request({ + method: "POST", + path: "/_ingest/_simulate", body: { docs: [ { diff --git a/docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc b/docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc similarity index 80% rename from docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc rename to docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc index d2f122662..5c1d8b6ed 100644 --- a/docs/doc_examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc +++ b/docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc @@ -4,9 +4,10 @@ [source, js] ---- const response = await client.indices.putSettings({ - index: "my-index-000001", + index: "*", settings: { "index.indexing.slowlog.include.user": true, + "index.indexing.slowlog.threshold.index.warn": "30s", }, }); console.log(response); diff --git a/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc index 81783cf66..b7fdbd587 100644 --- a/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc +++ b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc @@ -3,7 +3,9 @@ [source, js] ---- -const response = await client.connector.secretPost({ +const response = await client.transport.request({ + method: "POST", + path: "/_connector/_secret", body: { value: "encoded_api_key", }, diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 2830d6453..31a927b83 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -42,6 +42,7 @@ client.bulk({ ... }) * *Request (object):* ** *`index` (Optional, string)*: Name of the data stream, index, or index alias to perform bulk actions on. ** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* +** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were executed for each index or create. ** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. @@ -55,6 +56,7 @@ Valid values: `true`, `false`, `wait_for`. ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ** *`require_alias` (Optional, boolean)*: If `true`, the request’s actions must target an index alias. +** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to-be-created). [discrete] === clear_scroll @@ -531,7 +533,24 @@ client.getSource({ id, index }) [discrete] === health_report -Returns the health of the cluster. +Get the cluster health. +Get a report with the health status of an Elasticsearch cluster. +The report contains a list of indicators that compose Elasticsearch functionality. + +Each indicator has a health status of: green, unknown, yellow or red. +The indicator will provide an explanation and metadata describing the reason for its current health status. + +The cluster’s status is controlled by the worst indicator status. + +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. +Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. + +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. +The root cause and remediation steps are encapsulated in a diagnosis. +A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. + +NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. +When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. {ref}/health-api.html[Endpoint documentation] [source,ts] @@ -825,7 +844,7 @@ If `true`, the point in time will contain all the shards that are available at t [discrete] === ping Ping the cluster. -Returns whether the cluster is running. +Get information about whether the cluster is running. {ref}/index.html[Endpoint documentation] [source,ts] @@ -1792,6 +1811,11 @@ client.cat.aliases({ ... }) * *Request (object):* ** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== allocation @@ -1810,6 +1834,11 @@ client.cat.allocation({ ... }) * *Request (object):* ** *`node_id` (Optional, string | string[])*: List of node identifiers or names used to limit the returned information. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== component_templates @@ -1831,6 +1860,11 @@ client.cat.componentTemplates({ ... }) * *Request (object):* ** *`name` (Optional, string)*: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== count @@ -1948,6 +1982,7 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para ** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. ** *`pri` (Optional, boolean)*: If true, the response only includes information from primary shards. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== master @@ -1957,9 +1992,18 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-master.html[Endpoint documentation] [source,ts] ---- -client.cat.master() +client.cat.master({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== ml_data_frame_analytics @@ -1986,7 +2030,7 @@ client.cat.mlDataFrameAnalytics({ ... }) ** *`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names to display. ** *`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, string | -1 | 0)*: Unit used to display time values. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== ml_datafeeds @@ -2089,6 +2133,7 @@ If `false`, the API returns a 404 status code when there are no matches or only ** *`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names or aliases used to sort the response. ** *`from` (Optional, number)*: Skips the specified number of transforms. ** *`size` (Optional, number)*: The maximum number of transforms to display. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== nodeattrs @@ -2098,9 +2143,18 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-nodeattrs.html[Endpoint documentation] [source,ts] ---- -client.cat.nodeattrs() +client.cat.nodeattrs({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== nodes @@ -2120,6 +2174,8 @@ client.cat.nodes({ ... }) ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. ** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID. ** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== pending_tasks @@ -2129,9 +2185,19 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-pending-tasks.html[Endpoint documentation] [source,ts] ---- -client.cat.pendingTasks() +client.cat.pendingTasks({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== plugins @@ -2141,9 +2207,19 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-plugins.html[Endpoint documentation] [source,ts] ---- -client.cat.plugins() +client.cat.plugins({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`include_bootstrap` (Optional, boolean)*: Include bootstrap plugins in the response +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== recovery @@ -2167,6 +2243,7 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para ** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== repositories @@ -2176,9 +2253,18 @@ IMPORTANT: cat APIs are only intended for human consumption using the command li {ref}/cat-repositories.html[Endpoint documentation] [source,ts] ---- -client.cat.repositories() +client.cat.repositories({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== segments @@ -2200,6 +2286,11 @@ client.cat.segments({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== shards @@ -2221,6 +2312,8 @@ client.cat.shards({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. ** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== snapshots @@ -2243,6 +2336,8 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, the response does not include information from unavailable snapshots. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. [discrete] ==== tasks @@ -2261,8 +2356,12 @@ client.cat.tasks({ ... }) * *Request (object):* ** *`actions` (Optional, string[])*: The task action names, which are used to limit the response. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. -** *`node_id` (Optional, string[])*: Unique node identifiers, which are used to limit the response. +** *`nodes` (Optional, string[])*: Unique node identifiers, which are used to limit the response. ** *`parent_task_id` (Optional, string)*: The parent task identifier, which is used to limit the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. [discrete] ==== templates @@ -2282,6 +2381,11 @@ client.cat.templates({ ... }) * *Request (object):* ** *`name` (Optional, string)*: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== thread_pool @@ -2302,6 +2406,11 @@ client.cat.threadPool({ ... }) ** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request. Accepts wildcard expressions. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. [discrete] ==== transforms @@ -2337,7 +2446,8 @@ If `false`, the request returns a 404 status code when there are no matches or o === ccr [discrete] ==== delete_auto_follow_pattern -Deletes auto-follow patterns. +Delete auto-follow patterns. +Delete a collection of cross-cluster replication auto-follow patterns. {ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2353,7 +2463,9 @@ client.ccr.deleteAutoFollowPattern({ name }) [discrete] ==== follow -Creates a new follower index configured to follow the referenced leader index. +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. {ref}/ccr-put-follow.html[Endpoint documentation] [source,ts] @@ -2382,7 +2494,9 @@ client.ccr.follow({ index }) [discrete] ==== follow_info -Retrieves information about all follower indices, including parameters and status for each follower index +Get follower information. +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. {ref}/ccr-get-follow-info.html[Endpoint documentation] [source,ts] @@ -2398,7 +2512,9 @@ client.ccr.followInfo({ index }) [discrete] ==== follow_stats -Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. +Get follower stats. +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. {ref}/ccr-get-follow-stats.html[Endpoint documentation] [source,ts] @@ -2414,7 +2530,18 @@ client.ccr.followStats({ index }) [discrete] ==== forget_follower -Removes the follower retention leases from the leader. +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. + +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. + +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. {ref}/ccr-post-forget-follower.html[Endpoint documentation] [source,ts] @@ -2434,7 +2561,8 @@ client.ccr.forgetFollower({ index }) [discrete] ==== get_auto_follow_pattern -Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. +Get auto-follow patterns. +Get cross-cluster replication auto-follow patterns. {ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2450,7 +2578,14 @@ client.ccr.getAutoFollowPattern({ ... }) [discrete] ==== pause_auto_follow_pattern -Pauses an auto-follow pattern +Pause an auto-follow pattern. +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. + +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. {ref}/ccr-pause-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2466,7 +2601,11 @@ client.ccr.pauseAutoFollowPattern({ name }) [discrete] ==== pause_follow -Pauses a follower index. The follower index will not fetch any additional operations from the leader index. +Pause a follower. +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. {ref}/ccr-post-pause-follow.html[Endpoint documentation] [source,ts] @@ -2482,7 +2621,13 @@ client.ccr.pauseFollow({ index }) [discrete] ==== put_auto_follow_pattern -Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. + +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. {ref}/ccr-put-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2513,7 +2658,10 @@ client.ccr.putAutoFollowPattern({ name, remote_cluster }) [discrete] ==== resume_auto_follow_pattern -Resumes an auto-follow pattern that has been paused +Resume an auto-follow pattern. +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. {ref}/ccr-resume-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2529,7 +2677,11 @@ client.ccr.resumeAutoFollowPattern({ name }) [discrete] ==== resume_follow -Resumes a follower index that has been paused +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. {ref}/ccr-post-resume-follow.html[Endpoint documentation] [source,ts] @@ -2555,7 +2707,8 @@ client.ccr.resumeFollow({ index }) [discrete] ==== stats -Gets all stats related to cross-cluster replication. +Get cross-cluster replication stats. +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. {ref}/ccr-get-stats.html[Endpoint documentation] [source,ts] @@ -2566,7 +2719,12 @@ client.ccr.stats() [discrete] ==== unfollow -Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +Unfollow an index. +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. + +NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. {ref}/ccr-post-unfollow.html[Endpoint documentation] [source,ts] @@ -2584,7 +2742,11 @@ client.ccr.unfollow({ index }) === cluster [discrete] ==== allocation_explain -Provides explanations for shard allocations in the cluster. +Explain the shard allocations. +Get explanations for shard allocations in the cluster. +For unassigned shards, it provides an explanation for why the shard is unassigned. +For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. +This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. {ref}/cluster-allocation-explain.html[Endpoint documentation] [source,ts] @@ -2627,7 +2789,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_voting_config_exclusions -Clears cluster voting config exclusions. +Clear cluster voting config exclusions. +Remove master-eligible nodes from the voting configuration exclusion list. {ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] @@ -2695,7 +2858,7 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_settings -Returns cluster-wide settings. +Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. {ref}/cluster-get-settings.html[Endpoint documentation] @@ -2717,8 +2880,16 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== health -The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. -The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. +Get the cluster health status. +You can also use the API to get the health status of only specified data streams and indices. +For data streams, the API retrieves the health status of the stream’s backing indices. + +The cluster health status is: green, yellow or red. +On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. +The index level status is controlled by the worst shard status. + +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. +The cluster status is controlled by the worst index status. {ref}/cluster-health.html[Endpoint documentation] [source,ts] @@ -2762,9 +2933,11 @@ client.cluster.info({ target }) [discrete] ==== pending_tasks -Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. +Get the pending cluster tasks. +Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. + NOTE: This API returns a list of any pending updates to the cluster state. -These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. {ref}/cluster-pending.html[Endpoint documentation] @@ -2784,7 +2957,24 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== post_voting_config_exclusions -Updates the cluster voting config exclusions by node ids or node names. +Update voting configuration exclusions. +Update the cluster voting config exclusions by node IDs or node names. +By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. +If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. +The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. +It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. + +Clusters should have no voting configuration exclusions in normal operation. +Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. +This API waits for the nodes to be fully removed from the cluster before it returns. +If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. + +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. +In that case, you may safely retry the call. + +NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. +They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. {ref}/voting-config-exclusions.html[Endpoint documentation] [source,ts] @@ -2855,7 +3045,24 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_settings -Updates the cluster settings. +Update the cluster settings. +Configure and update dynamic settings on a running cluster. +You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. + +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. +You can also reset transient or persistent settings by assigning them a null value. + +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. +For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. +However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. + +TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. +If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. +Only use `elasticsearch.yml` for static cluster settings and node settings. +The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. + +WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. +If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. {ref}/cluster-update-settings.html[Endpoint documentation] [source,ts] @@ -2875,9 +3082,9 @@ client.cluster.putSettings({ ... }) [discrete] ==== remote_info -The cluster remote info API allows you to retrieve all of the configured -remote cluster information. It returns connection and endpoint information -keyed by the configured remote cluster alias. +Get remote cluster information. +Get all of the configured remote cluster information. +This API returns connection and endpoint information keyed by the configured remote cluster alias. {ref}/cluster-remote-info.html[Endpoint documentation] [source,ts] @@ -2888,7 +3095,20 @@ client.cluster.remoteInfo() [discrete] ==== reroute -Allows to manually change the allocation of individual shards in the cluster. +Reroute the cluster. +Manually change the allocation of individual shards in the cluster. +For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. + +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. +For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. + +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. +If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. + +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. +This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. + +Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. {ref}/cluster-reroute.html[Endpoint documentation] [source,ts] @@ -2901,8 +3121,9 @@ client.cluster.reroute({ ... }) * *Request (object):* ** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. -** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation only and returns the resulting state. -** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot be executed. +** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation. +It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. +** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot run. ** *`metric` (Optional, string | string[])*: Limits the information returned to the specified metrics. ** *`retry_failed` (Optional, boolean)*: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -2910,7 +3131,25 @@ client.cluster.reroute({ ... }) [discrete] ==== state -Returns a comprehensive information about the state of the cluster. +Get the cluster state. +Get comprehensive information about the state of the cluster. + +The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. + +The elected master node ensures that every node in the cluster has a copy of the same cluster state. +This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. +You may need to consult the Elasticsearch source code to determine the precise meaning of the response. + +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. +You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. + +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. +If you use this API repeatedly, your cluster may become unstable. + +WARNING: The response is a representation of an internal data structure. +Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. +Do not query this API using external monitoring tools. +Instead, obtain the information you require using other more stable cluster APIs. {ref}/cluster-state.html[Endpoint documentation] [source,ts] @@ -2935,8 +3174,8 @@ client.cluster.state({ ... }) [discrete] ==== stats -Returns cluster statistics. -It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). +Get cluster statistics. +Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). {ref}/cluster-stats.html[Endpoint documentation] [source,ts] @@ -3105,23 +3344,48 @@ client.connector.syncJobCancel({ connector_sync_job_id }) [discrete] ==== sync_job_check_in -Checks in a connector sync job (refreshes 'last_seen'). +Check in a connector sync job. +Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/check-in-connector-sync-job-api.html[Endpoint documentation] [source,ts] ---- -client.connector.syncJobCheckIn() +client.connector.syncJobCheckIn({ connector_sync_job_id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be checked in. [discrete] ==== sync_job_claim -Claims a connector sync job. +Claim a connector sync job. +This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. +Additionally, it can set the `sync_cursor` property for the sync job. + +This API is not intended for direct connector management by users. +It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. [source,ts] ---- -client.connector.syncJobClaim() +client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. +** *`worker_hostname` (string)*: The host name of the current system that will run the job. +** *`sync_cursor` (Optional, User-defined value)*: The cursor object from the last incremental sync job. +This should reference the `sync_cursor` field in the connector state for which the job runs. [discrete] ==== sync_job_delete @@ -3144,14 +3408,24 @@ client.connector.syncJobDelete({ connector_sync_job_id }) [discrete] ==== sync_job_error -Sets an error for a connector sync job. +Set a connector sync job error. +Set the `error` field for a connector sync job and set its `status` to `error`. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/set-connector-sync-job-error-api.html[Endpoint documentation] [source,ts] ---- -client.connector.syncJobError() +client.connector.syncJobError({ connector_sync_job_id, error }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier for the connector sync job. +** *`error` (string)*: The error for the connector sync job error field. [discrete] ==== sync_job_get @@ -3306,14 +3580,33 @@ client.connector.updateError({ connector_id, error }) [discrete] ==== update_features -Updates the connector features in the connector document. +Update the connector features. +Update the connector features in the connector document. +This API can be used to control the following aspects of a connector: + +* document-level security +* incremental syncs +* advanced sync rules +* basic sync rules + +Normally, the running connector service automatically manages these features. +However, you can use this API to override the default behavior. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. {ref}/update-connector-features-api.html[Endpoint documentation] [source,ts] ---- -client.connector.updateFeatures() +client.connector.updateFeatures({ connector_id, features }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated. +** *`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })* [discrete] ==== update_filtering @@ -3781,7 +4074,16 @@ Defaults to `false`. If `true` then the response will include an extra section u === features [discrete] ==== get_features -Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. {ref}/get-features-api.html[Endpoint documentation] [source,ts] @@ -3792,7 +4094,23 @@ client.features.getFeatures() [discrete] ==== reset_features -Resets the internal state of features, usually by deleting system indices +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. + +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. + +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. + +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. + +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. + +To list the features that will be affected, use the get features API. + +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -3999,7 +4317,8 @@ Defaults to no timeout. === ilm [discrete] ==== delete_lifecycle -Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. {ref}/ilm-delete-lifecycle.html[Endpoint documentation] [source,ts] @@ -4017,7 +4336,11 @@ client.ilm.deleteLifecycle({ policy }) [discrete] ==== explain_lifecycle -Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. + +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. {ref}/ilm-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -4038,7 +4361,7 @@ To target all data streams and indices, use `*` or `_all`. [discrete] ==== get_lifecycle -Retrieves a lifecycle policy. +Get lifecycle policies. {ref}/ilm-get-lifecycle.html[Endpoint documentation] [source,ts] @@ -4056,7 +4379,8 @@ client.ilm.getLifecycle({ ... }) [discrete] ==== get_status -Retrieves the current index lifecycle management (ILM) status. +Get the ILM status. +Get the current index lifecycle management status. {ref}/ilm-get-status.html[Endpoint documentation] [source,ts] @@ -4067,10 +4391,21 @@ client.ilm.getStatus() [discrete] ==== migrate_to_data_tiers -Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and -attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: + +1. Stop setting the custom hot attribute on new indices. +2. Remove custom allocation settings from existing ILM policies. +3. Replace custom allocation settings from existing indices with the corresponding tier preference. + +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. + {ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] [source,ts] ---- @@ -4088,7 +4423,20 @@ This provides a way to retrieve the indices and ILM policies that need to be mig [discrete] ==== move_to_step -Manually moves an index into the specified step and executes that step. +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. + +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. + +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. + +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. {ref}/ilm-move-to-step.html[Endpoint documentation] [source,ts] @@ -4106,7 +4454,10 @@ client.ilm.moveToStep({ index, current_step, next_step }) [discrete] ==== put_lifecycle -Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. + +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. {ref}/ilm-put-lifecycle.html[Endpoint documentation] [source,ts] @@ -4124,7 +4475,9 @@ client.ilm.putLifecycle({ policy }) [discrete] ==== remove_policy -Removes the assigned lifecycle policy and stops managing the specified index +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. {ref}/ilm-remove-policy.html[Endpoint documentation] [source,ts] @@ -4140,7 +4493,10 @@ client.ilm.removePolicy({ index }) [discrete] ==== retry -Retries executing the policy for an index that is in the ERROR step. +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. {ref}/ilm-retry-policy.html[Endpoint documentation] [source,ts] @@ -4156,7 +4512,10 @@ client.ilm.retry({ index }) [discrete] ==== start -Start the index lifecycle management (ILM) plugin. +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. {ref}/ilm-start.html[Endpoint documentation] [source,ts] @@ -4173,7 +4532,12 @@ client.ilm.start({ ... }) [discrete] ==== stop -Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. {ref}/ilm-stop.html[Endpoint documentation] [source,ts] @@ -4247,8 +4611,9 @@ If an array of strings is provided, it is analyzed as a multi-value field. [discrete] ==== clear_cache -Clears the caches of one or more indices. -For data streams, the API clears the caches of the stream’s backing indices. +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. {ref}/indices-clearcache.html[Endpoint documentation] [source,ts] @@ -4278,7 +4643,29 @@ Use the `fields` parameter to clear the cache of specific fields only. [discrete] ==== clone -Clones an existing index. +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be cloned if they meet the following requirements: + +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-clone-index.html[Endpoint documentation] [source,ts] @@ -4303,7 +4690,24 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== close -Closes an index. +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. {ref}/indices-close.html[Endpoint documentation] [source,ts] @@ -4543,7 +4947,10 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== disk_usage -Analyzes the disk usage of each field of an index or data stream. +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. {ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] @@ -4571,7 +4978,14 @@ To use the API, this parameter must be set to `true`. [discrete] ==== downsample -Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +Downsample an index. +Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. + +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read only (`index.blocks.write: true`). {ref}/indices-downsample-data-stream.html[Endpoint documentation] [source,ts] @@ -4682,7 +5096,7 @@ client.indices.existsTemplate({ name }) [discrete] ==== explain_data_lifecycle Get the status for a data stream lifecycle. -Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. {ref}/data-streams-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -4700,7 +5114,10 @@ client.indices.explainDataLifecycle({ index }) [discrete] ==== field_usage_stats -Returns field usage information for each shard and field of an index. +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. {ref}/field-usage-stats.html[Endpoint documentation] [source,ts] @@ -4730,7 +5147,17 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== flush -Flushes one or more data streams or indices. +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. {ref}/indices-flush.html[Endpoint documentation] [source,ts] @@ -4758,7 +5185,19 @@ If `false`, Elasticsearch returns an error if you request a flush when another f [discrete] ==== forcemerge -Performs the force merge operation on one or more indices. +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. {ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] @@ -5109,7 +5548,17 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== promote_data_stream -Promotes a data stream from a replicated data stream managed by CCR to a regular data stream +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -5332,6 +5781,16 @@ error. ==== put_template Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. {ref}/indices-templates-v1.html[Endpoint documentation] [source,ts] @@ -5363,8 +5822,27 @@ received before the timeout expires, the request fails and returns an error. [discrete] ==== recovery -Returns information about ongoing and completed shard recoveries for one or more indices. -For data streams, the API returns information for the stream’s backing indices. +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. + +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. + +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. {ref}/indices-recovery.html[Endpoint documentation] [source,ts] @@ -5411,7 +5889,20 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== reload_search_analyzers -Reloads an index's search analyzers and their resources. +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. + +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. + +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. {ref}/indices-reload-analyzers.html[Endpoint documentation] [source,ts] @@ -5430,10 +5921,23 @@ client.indices.reloadSearchAnalyzers({ index }) [discrete] ==== resolve_cluster -Resolves the specified index expressions to return information about each cluster, including -the local cluster, if included. +Resolve the cluster. +Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. + +For each cluster in the index expression, information is returned about: + +* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. + {ref}/indices-resolve-cluster-api.html[Endpoint documentation] [source,ts] ---- @@ -5523,8 +6027,9 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== segments -Returns low-level information about the Lucene segments in index shards. -For data streams, the API returns information about the stream’s backing indices. +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. {ref}/indices-segments.html[Endpoint documentation] [source,ts] @@ -5550,8 +6055,18 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== shard_stores -Retrieves store information about replica shards in one or more indices. -For data streams, the API retrieves store information for the stream’s backing indices. +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. + +The index shard stores API returns the following information: + +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. + +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. {ref}/indices-shards-stores.html[Endpoint documentation] [source,ts] @@ -5574,7 +6089,38 @@ this argument determines whether wildcard expressions match hidden data streams. [discrete] ==== shrink -Shrinks an existing index into a new index with fewer primary shards. +Shrink an index. +Shrink an index into a new index with fewer primary shards. + +Before you can shrink an index: + +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. + +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. + +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + +A shrink operation: + +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-shrink-index.html[Endpoint documentation] [source,ts] @@ -5664,7 +6210,30 @@ that uses deprecated components, Elasticsearch will emit a deprecation warning. [discrete] ==== split -Splits an existing index into a new index with more primary shards. +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + +A split operation: + +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be split if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-split-index.html[Endpoint documentation] [source,ts] @@ -5689,8 +6258,17 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== stats -Returns statistics for one or more indices. -For data streams, the API retrieves statistics for the stream’s backing indices. +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. + +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. + +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. {ref}/indices-stats.html[Endpoint documentation] [source,ts] @@ -5718,7 +6296,8 @@ such as `open,hidden`. [discrete] ==== unfreeze -Unfreezes an index. +Unfreeze an index. +When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. {ref}/unfreeze-index-api.html[Endpoint documentation] [source,ts] @@ -5867,7 +6446,16 @@ Not required for other tasks. [discrete] ==== put -Create an inference endpoint +Create an inference endpoint. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. {ref}/put-inference-api.html[Endpoint documentation] [source,ts] @@ -5916,14 +6504,22 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== delete_ip_location_database -Deletes an ip location database configuration +Deletes an IP location database configuration. {ref}/delete-ip-location-database-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.deleteIpLocationDatabase() +client.ingest.deleteIpLocationDatabase({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string | string[])*: A list of IP location database configurations to delete +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_pipeline @@ -5982,14 +6578,23 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== get_ip_location_database -Returns the specified ip location database configuration +Returns information about one or more IP location database configurations. {ref}/get-ip-location-database-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.getIpLocationDatabase() +client.ingest.getIpLocationDatabase({ ... }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_pipeline @@ -6053,14 +6658,23 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== put_ip_location_database -Puts the configuration for a ip location database to be downloaded +Returns information about one or more IP location database configurations. {ref}/put-ip-location-database-api.html[Endpoint documentation] [source,ts] ---- -client.ingest.putIpLocationDatabase() +client.ingest.putIpLocationDatabase({ id }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: ID of the database configuration to create or update. +** *`configuration` (Optional, { name, maxmind, ipinfo })* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== put_pipeline @@ -6117,7 +6731,10 @@ If you specify both this and the request path parameter, the API only uses the r === license [discrete] ==== delete -Deletes licensing information for the cluster +Delete the license. +When the license expires, your subscription level reverts to Basic. + +If the operator privileges feature is enabled, only operator users can use this API. {ref}/delete-license.html[Endpoint documentation] [source,ts] @@ -6129,8 +6746,10 @@ client.license.delete() [discrete] ==== get Get license information. -Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. -For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. + +NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. {ref}/get-license.html[Endpoint documentation] [source,ts] @@ -6148,7 +6767,7 @@ This parameter is deprecated and will always be set to true in 8.x. [discrete] ==== get_basic_status -Retrieves information about the status of the basic license. +Get the basic license status. {ref}/get-basic-status.html[Endpoint documentation] [source,ts] @@ -6159,7 +6778,7 @@ client.license.getBasicStatus() [discrete] ==== get_trial_status -Retrieves information about the status of the trial license. +Get the trial status. {ref}/get-trial-status.html[Endpoint documentation] [source,ts] @@ -6170,7 +6789,14 @@ client.license.getTrialStatus() [discrete] ==== post -Updates the license for the cluster. +Update the license. +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. + +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. {ref}/update-license.html[Endpoint documentation] [source,ts] @@ -6188,8 +6814,15 @@ client.license.post({ ... }) [discrete] ==== post_start_basic -The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. -To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). +Start a basic license. +Start an indefinite basic license, which gives access to all the basic features. + +NOTE: In order to start a basic license, you must not currently have a basic license. + +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. + +To check the status of your basic license, use the get basic license API. {ref}/start-basic.html[Endpoint documentation] [source,ts] @@ -6205,7 +6838,13 @@ client.license.postStartBasic({ ... }) [discrete] ==== post_start_trial -The start trial API enables you to start a 30-day trial, which gives access to all subscription features. +Start a trial. +Start a 30-day trial, which gives access to all subscription features. + +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + +To check the status of your trial, use the get trial status API. {ref}/start-trial.html[Endpoint documentation] [source,ts] @@ -6224,7 +6863,9 @@ client.license.postStartTrial({ ... }) === logstash [discrete] ==== delete_pipeline -Deletes a pipeline used for Logstash Central Management. +Delete a Logstash pipeline. + +Delete a pipeline that is used for Logstash Central Management. {ref}/logstash-api-delete-pipeline.html[Endpoint documentation] [source,ts] @@ -6236,11 +6877,13 @@ client.logstash.deletePipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the pipeline. +** *`id` (string)*: An identifier for the pipeline. [discrete] ==== get_pipeline -Retrieves pipelines used for Logstash Central Management. +Get Logstash pipelines. + +Get pipelines that are used for Logstash Central Management. {ref}/logstash-api-get-pipeline.html[Endpoint documentation] [source,ts] @@ -6252,11 +6895,14 @@ client.logstash.getPipeline({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string | string[])*: List of pipeline identifiers. +** *`id` (Optional, string | string[])*: A list of pipeline identifiers. [discrete] ==== put_pipeline -Creates or updates a pipeline used for Logstash Central Management. +Create or update a Logstash pipeline. + +Create a pipeline that is used for Logstash Central Management. +If the specified pipeline exists, it is replaced. {ref}/logstash-api-put-pipeline.html[Endpoint documentation] [source,ts] @@ -6268,14 +6914,17 @@ client.logstash.putPipeline({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the pipeline. +** *`id` (string)*: An identifier for the pipeline. ** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })* [discrete] === migration [discrete] ==== deprecations -Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. +Get deprecation information. +Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + +TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. {ref}/migration-api-deprecation.html[Endpoint documentation] [source,ts] @@ -6291,7 +6940,12 @@ client.migration.deprecations({ ... }) [discrete] ==== get_feature_upgrade_status -Find out whether system features need to be upgraded or not +Get feature migration information. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +Check which features need to be migrated and the status of any migrations that are in progress. + +TIP: This API is designed for indirect use by the Upgrade Assistant. +We strongly recommend you use the Upgrade Assistant. {ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] @@ -6302,7 +6956,13 @@ client.migration.getFeatureUpgradeStatus() [discrete] ==== post_feature_upgrade -Begin upgrades for system features +Start the feature migration. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +This API starts the automatic migration process. + +Some functionality might be temporarily unavailable during the migration process. + +TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. {ref}/migration-api-feature-upgrade.html[Endpoint documentation] [source,ts] @@ -6455,7 +7115,7 @@ Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by -using a job identifier, a group name, a comma-separated list of jobs, or a +using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . @@ -6866,7 +7526,7 @@ This parameter has the `from` and `size` properties. ==== get_data_frame_analytics Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single -API request by using a comma-separated list of data frame analytics jobs or a +API request by using a list of data frame analytics jobs or a wildcard expression. {ref}/get-dfanalytics.html[Endpoint documentation] @@ -6935,7 +7595,7 @@ there are no matches or only partial matches. ==== get_datafeed_stats Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by -using a comma-separated list of datafeeds or a wildcard expression. You can +using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. @@ -6969,7 +7629,7 @@ partial matches. If this parameter is `false`, the request returns a ==== get_datafeeds Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by -using a comma-separated list of datafeeds or a wildcard expression. You can +using a list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. @@ -7088,7 +7748,7 @@ code when there are no matches or only partial matches. ==== get_jobs Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API -request by using a group name, a comma-separated list of jobs, or a wildcard +request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. @@ -7314,6 +7974,7 @@ be retrieved and then added to another cluster. ** *`from` (Optional, number)*: Skips the specified number of models. ** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response body. +** *`include_model_definition` (Optional, boolean)*: parameter is deprecated! Use [include=definition] instead ** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. ** *`tags` (Optional, string | string[])*: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -7323,7 +7984,7 @@ tags are returned. ==== get_trained_models_stats Get trained models usage info. You can get usage information for multiple trained -models in a single API request by using a comma-separated list of model IDs or a wildcard expression. +models in a single API request by using a list of model IDs or a wildcard expression. {ref}/get-trained-models-stats.html[Endpoint documentation] [source,ts] @@ -7432,7 +8093,7 @@ client.ml.postCalendarEvents({ calendar_id, events }) Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. -It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. +It is not currently possible to post data to multiple jobs using wildcards or a list. {ref}/ml-post-data.html[Endpoint documentation] [source,ts] @@ -7603,6 +8264,7 @@ model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. +** *`_meta` (Optional, Record)* ** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try @@ -7733,6 +8395,18 @@ client.ml.putJob({ job_id, analysis_config, data_description }) ** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. ** *`results_index_name` (Optional, string)*: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. ** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. [discrete] ==== put_trained_model @@ -8308,7 +8982,7 @@ bucket result. If this property has a non-null value, once per day at than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. ** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. -** *`detectors` (Optional, { by_field_name, custom_rules, detector_description, detector_index, exclude_frequent, field_name, function, over_field_name, partition_field_name, use_null }[])*: An array of detector update objects. +** *`detectors` (Optional, { detector_index, description, custom_rules }[])*: An array of detector update objects. ** *`per_partition_categorization` (Optional, { enabled, stop_on_warn })*: Settings related to how categorization interacts with partition fields. [discrete] @@ -8384,33 +9058,12 @@ client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) Otherwise, it responds as soon as the upgrade task is assigned to a node. ** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the request to complete. -[discrete] -=== monitoring -[discrete] -==== bulk -Used by the monitoring features to send monitoring data. - -{ref}/monitor-elasticsearch-cluster.html[Endpoint documentation] -[source,ts] ----- -client.monitoring.bulk({ system_id, system_api_version, interval }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`system_id` (string)*: Identifier of the monitored system -** *`system_api_version` (string)* -** *`interval` (string | -1 | 0)*: Collection interval (e.g., '10s' or '10000ms') of the payload -** *`type` (Optional, string)*: Default document type for items which don't provide one -** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* - [discrete] === nodes [discrete] ==== clear_repositories_metering_archive -You can use this API to clear the archived repositories metering information in the cluster. +Clear the archived repositories metering. +Clear the archived repositories metering information in the cluster. {ref}/clear-repositories-metering-archive-api.html[Endpoint documentation] [source,ts] @@ -8428,10 +9081,10 @@ All the nodes selective options are explained [here](https://www.elastic.co/guid [discrete] ==== get_repositories_metering_info -You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. -This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the -information needed to compute aggregations over a period of time. Additionally, the information exposed by this -API is volatile, meaning that it won’t be present after node restarts. +Get cluster repositories metering. +Get repositories metering information for a cluster. +This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. +Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. {ref}/get-repositories-metering-api.html[Endpoint documentation] [source,ts] @@ -8448,8 +9101,9 @@ All the nodes selective options are explained [here](https://www.elastic.co/guid [discrete] ==== hot_threads -This API yields a breakdown of the hot threads on each selected node in the cluster. -The output is plain text with a breakdown of each node’s top hot threads. +Get the hot threads for nodes. +Get a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of the top hot threads for each node. {ref}/cluster-nodes-hot-threads.html[Endpoint documentation] [source,ts] @@ -8477,7 +9131,8 @@ before the timeout expires, the request fails and returns an error. [discrete] ==== info -Returns cluster nodes information. +Get node information. +By default, the API returns all attributes and core settings for cluster nodes. {ref}/cluster-nodes-info.html[Endpoint documentation] [source,ts] @@ -8497,7 +9152,15 @@ client.nodes.info({ ... }) [discrete] ==== reload_secure_settings -Reloads the keystore on nodes in the cluster. +Reload the keystore on nodes in the cluster. + +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. +That is, you can change them on disk and reload them without restarting any nodes in the cluster. +When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. + +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. +Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. +Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. {ref}/secure-settings.html[Endpoint documentation] [source,ts] @@ -8516,7 +9179,9 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== stats -Returns cluster nodes statistics. +Get node statistics. +Get statistics for nodes in a cluster. +By default, all stats are returned. You can limit the returned information by using metrics. {ref}/cluster-nodes-stats.html[Endpoint documentation] [source,ts] @@ -8544,7 +9209,7 @@ client.nodes.stats({ ... }) [discrete] ==== usage -Returns information on the usage of features. +Get feature usage information. {ref}/cluster-nodes-usage.html[Endpoint documentation] [source,ts] @@ -8712,7 +9377,30 @@ client.queryRules.test({ ruleset_id, match_criteria }) === rollup [discrete] ==== delete_job -Deletes an existing rollup job. +Delete a rollup job. + +A job must be stopped before it can be deleted. +If you attempt to delete a started job, an error occurs. +Similarly, if you attempt to delete a nonexistent job, an exception occurs. + +IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. +The API does not delete any previously rolled up data. +This is by design; a user may wish to roll up a static data set. +Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). +Thus the job can be deleted, leaving behind the rolled up data for analysis. +If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. +If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: + +``` +POST my_rollup_index/_delete_by_query +{ + "query": { + "term": { + "_rollup.id": "the_rollup_job_id" + } + } +} +``` {ref}/rollup-delete-job.html[Endpoint documentation] [source,ts] @@ -8728,7 +9416,12 @@ client.rollup.deleteJob({ id }) [discrete] ==== get_jobs -Retrieves the configuration, stats, and status of rollup jobs. +Get rollup job information. +Get the configuration, stats, and status of rollup jobs. + +NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +If a job was created, ran for a while, then was deleted, the API does not return any details about it. +For details about a historical rollup job, the rollup capabilities API may be more useful. {ref}/rollup-get-job.html[Endpoint documentation] [source,ts] @@ -8745,7 +9438,15 @@ If it is `_all` or omitted, the API returns all rollup jobs. [discrete] ==== get_rollup_caps -Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. +Get the rollup job capabilities. +Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. +Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. +This API enables you to inspect an index and determine: + +1. Does this index have associated rollup data somewhere in the cluster? +2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? {ref}/rollup-get-rollup-caps.html[Endpoint documentation] [source,ts] @@ -8762,7 +9463,12 @@ client.rollup.getRollupCaps({ ... }) [discrete] ==== get_rollup_index_caps -Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). +Get the rollup index capabilities. +Get the rollup capabilities of all jobs inside of a rollup index. +A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: + +* What jobs are stored in an index (or indices specified via a pattern)? +* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? {ref}/rollup-get-rollup-index-caps.html[Endpoint documentation] [source,ts] @@ -8779,7 +9485,15 @@ Wildcard (`*`) expressions are supported. [discrete] ==== put_job -Creates a rollup job. +Create a rollup job. + +WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. + +The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. + +There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. + +Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. {ref}/rollup-put-job.html[Endpoint documentation] [source,ts] @@ -8820,7 +9534,9 @@ on a per-field basis and for each field you configure which metric should be col [discrete] ==== rollup_search -Enables searching rolled-up data using the standard Query DSL. +Search rolled-up data. +The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. {ref}/rollup-search.html[Endpoint documentation] [source,ts] @@ -8841,7 +9557,9 @@ client.rollup.rollupSearch({ index }) [discrete] ==== start_job -Starts an existing, stopped rollup job. +Start rollup jobs. +If you try to start a job that does not exist, an exception occurs. +If you try to start a job that is already started, nothing happens. {ref}/rollup-start-job.html[Endpoint documentation] [source,ts] @@ -8857,7 +9575,9 @@ client.rollup.startJob({ id }) [discrete] ==== stop_job -Stops an existing, started rollup job. +Stop rollup jobs. +If you try to stop a job that does not exist, an exception occurs. +If you try to stop a job that is already stopped, nothing happens. {ref}/rollup-stop-job.html[Endpoint documentation] [source,ts] @@ -8945,7 +9665,8 @@ client.searchApplication.getBehavioralAnalytics({ ... }) [discrete] ==== list -Returns the existing search applications. +Get search applications. +Get information about search applications. {ref}/list-search-applications.html[Endpoint documentation] [source,ts] @@ -9008,14 +9729,25 @@ client.searchApplication.putBehavioralAnalytics({ name }) [discrete] ==== render_query -Renders a query for given search application search parameters +Render a search application query. +Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. +The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. + +You must have `read` privileges on the backing alias of the search application. {ref}/search-application-render-query.html[Endpoint documentation] [source,ts] ---- -client.searchApplication.renderQuery() +client.searchApplication.renderQuery({ name }) ---- +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to render teh query for. +** *`params` (Optional, Record)* [discrete] ==== search @@ -9041,7 +9773,8 @@ client.searchApplication.search({ name }) === searchable_snapshots [discrete] ==== cache_stats -Retrieve node-level cache statistics about searchable snapshots. +Get cache statistics. +Get statistics about the shared cache for partially mounted indices. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] @@ -9058,7 +9791,8 @@ client.searchableSnapshots.cacheStats({ ... }) [discrete] ==== clear_cache -Clear the cache of searchable snapshots. +Clear the cache. +Clear indices and data streams from the shared cache for partially mounted indices. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] @@ -9079,7 +9813,10 @@ client.searchableSnapshots.clearCache({ ... }) [discrete] ==== mount -Mount a snapshot as a searchable index. +Mount a snapshot. +Mount a snapshot as a searchable snapshot index. +Do not use this API for snapshots managed by index lifecycle management (ILM). +Manually mounting ILM-managed snapshots can interfere with ILM processes. {ref}/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] [source,ts] @@ -9103,7 +9840,7 @@ client.searchableSnapshots.mount({ repository, snapshot, index }) [discrete] ==== stats -Retrieve shard-level statistics about searchable snapshots. +Get searchable snapshot statistics. {ref}/searchable-snapshots-apis.html[Endpoint documentation] [source,ts] @@ -10497,7 +11234,15 @@ visible to search, if 'false' do nothing with refreshes. === shutdown [discrete] ==== delete_node -Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Cancel node shutdown preparations. +Remove a node from the shutdown list so it can resume normal operations. +You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. +Shutdown requests are never removed automatically by Elasticsearch. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. +Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] [source,ts] @@ -10515,7 +11260,14 @@ client.shutdown.deleteNode({ node_id }) [discrete] ==== get_node -Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Get the shutdown status. + +Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. +The API returns status information for each part of the shut down process. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] [source,ts] @@ -10533,7 +11285,20 @@ client.shutdown.getNode({ ... }) [discrete] ==== put_node -Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +Prepare a node to be shut down. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. + +The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. +This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. + +You must specify the type of shutdown: `restart`, `remove`, or `replace`. +If a node is already being prepared for shutdown, you can use this API to change the shutdown type. + +IMPORTANT: This API does NOT terminate the Elasticsearch process. +Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] [source,ts] @@ -10583,7 +11348,9 @@ client.simulate.ingest() === slm [discrete] ==== delete_lifecycle -Deletes an existing snapshot lifecycle policy. +Delete a policy. +Delete a snapshot lifecycle policy definition. +This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. {ref}/slm-api-delete-policy.html[Endpoint documentation] [source,ts] @@ -10599,7 +11366,9 @@ client.slm.deleteLifecycle({ policy_id }) [discrete] ==== execute_lifecycle -Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. +Run a policy. +Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. {ref}/slm-api-execute-lifecycle.html[Endpoint documentation] [source,ts] @@ -10615,7 +11384,9 @@ client.slm.executeLifecycle({ policy_id }) [discrete] ==== execute_retention -Deletes any snapshots that are expired according to the policy's retention rules. +Run a retention policy. +Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +The retention policy is normally applied according to its schedule. {ref}/slm-api-execute-retention.html[Endpoint documentation] [source,ts] @@ -10626,7 +11397,8 @@ client.slm.executeRetention() [discrete] ==== get_lifecycle -Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. +Get policy information. +Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. {ref}/slm-api-get-policy.html[Endpoint documentation] [source,ts] @@ -10642,7 +11414,8 @@ client.slm.getLifecycle({ ... }) [discrete] ==== get_stats -Returns global and policy-level statistics about actions taken by snapshot lifecycle management. +Get snapshot lifecycle management statistics. +Get global and policy-level statistics about actions taken by snapshot lifecycle management. {ref}/slm-api-get-stats.html[Endpoint documentation] [source,ts] @@ -10653,7 +11426,7 @@ client.slm.getStats() [discrete] ==== get_status -Retrieves the status of snapshot lifecycle management (SLM). +Get the snapshot lifecycle management status. {ref}/slm-api-get-status.html[Endpoint documentation] [source,ts] @@ -10664,7 +11437,10 @@ client.slm.getStatus() [discrete] ==== put_lifecycle -Creates or updates a snapshot lifecycle policy. +Create or update a policy. +Create or update a snapshot lifecycle policy. +If the policy already exists, this request increments the policy version. +Only the latest version of a policy is stored. {ref}/slm-api-put-policy.html[Endpoint documentation] [source,ts] @@ -10676,7 +11452,7 @@ client.slm.putLifecycle({ policy_id }) ==== Arguments * *Request (object):* -** *`policy_id` (string)*: ID for the snapshot lifecycle policy you want to create or update. +** *`policy_id` (string)*: The identifier for the snapshot lifecycle policy you want to create or update. ** *`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })*: Configuration for each snapshot created by the policy. ** *`name` (Optional, string)*: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. ** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. @@ -10687,7 +11463,9 @@ client.slm.putLifecycle({ policy_id }) [discrete] ==== start -Turns on snapshot lifecycle management (SLM). +Start snapshot lifecycle management. +Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +Manually starting SLM is necessary only if it has been stopped using the stop SLM API. {ref}/slm-api-start.html[Endpoint documentation] [source,ts] @@ -10698,7 +11476,14 @@ client.slm.start() [discrete] ==== stop -Turns off snapshot lifecycle management (SLM). +Stop snapshot lifecycle management. +Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. +Stopping SLM does not stop any snapshots that are in progress. +You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. + +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. +Use the get snapshot lifecycle management status API to see if SLM is running. {ref}/slm-api-stop.html[Endpoint documentation] [source,ts] @@ -10711,7 +11496,8 @@ client.slm.stop() === snapshot [discrete] ==== cleanup_repository -Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. +Clean up the snapshot repository. +Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. {ref}/clean-up-snapshot-repo-api.html[Endpoint documentation] [source,ts] @@ -10729,7 +11515,8 @@ client.snapshot.cleanupRepository({ repository }) [discrete] ==== clone -Clones indices from one snapshot into another snapshot in the same repository. +Clone a snapshot. +Clone part of all of a snapshot into another snapshot in the same repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10750,7 +11537,8 @@ client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) [discrete] ==== create -Creates a snapshot in a repository. +Create a snapshot. +Take a snapshot of a cluster or of data streams and indices. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10775,7 +11563,10 @@ client.snapshot.create({ repository, snapshot }) [discrete] ==== create_repository -Creates a repository. +Create or update a snapshot repository. +IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +To register a snapshot repository, the cluster's global metadata must be writeable. +Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10794,7 +11585,7 @@ client.snapshot.createRepository({ repository }) [discrete] ==== delete -Deletes one or more snapshots. +Delete snapshots. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10812,7 +11603,9 @@ client.snapshot.delete({ repository, snapshot }) [discrete] ==== delete_repository -Deletes a repository. +Delete snapshot repositories. +When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +The snapshots themselves are left untouched and in place. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10830,7 +11623,7 @@ client.snapshot.deleteRepository({ repository }) [discrete] ==== get -Returns information about a snapshot. +Get snapshot information. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10862,7 +11655,7 @@ client.snapshot.get({ repository, snapshot }) [discrete] ==== get_repository -Returns information about a repository. +Get snapshot repository information. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10891,7 +11684,24 @@ client.snapshot.repositoryAnalyze() [discrete] ==== restore -Restores a snapshot. +Restore a snapshot. +Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. +The snapshot repository must be registered and available to the cluster. +The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. + +Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: + +``` +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +``` + +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. + +If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10920,7 +11730,17 @@ client.snapshot.restore({ repository, snapshot }) [discrete] ==== status -Returns information about the status of a snapshot. +Get the snapshot status. +Get a detailed description of the current state for each shard participating in the snapshot. +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. +If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. + +WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. +The API requires a read from the repository for each shard in each snapshot. +For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). + +Depending on the latency of your storage, such requests can take an extremely long time to return results. +These requests can also tax machine resources and, when using cloud storage, incur high processing costs. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -10939,7 +11759,8 @@ client.snapshot.status({ ... }) [discrete] ==== verify_repository -Verifies a repository. +Verify a snapshot repository. +Check for common misconfigurations in a snapshot repository. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -11249,7 +12070,14 @@ client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) === tasks [discrete] ==== cancel -Cancels a task, if it can be cancelled through an API. +Cancel a task. +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. +It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. +The get task information API will continue to list these cancelled tasks until they complete. +The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. + +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. +You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -11270,7 +12098,7 @@ client.tasks.cancel({ ... }) [discrete] ==== get Get task information. -Returns information about the tasks currently executing in the cluster. +Get information about a task currently running in the cluster. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -11289,7 +12117,8 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== list -The task management API returns information about tasks currently executing on one or more nodes in the cluster. +Get all tasks. +Get information about the tasks currently running on one or more nodes in the cluster. {ref}/tasks.html[Endpoint documentation] [source,ts] @@ -11303,6 +12132,7 @@ client.tasks.list({ ... }) * *Request (object):* ** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. ** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +This information is useful to distinguish tasks from each other but is more costly to run. ** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: Key used to group tasks in the response. ** *`nodes` (Optional, string | string[])*: List of node IDs or names used to limit returned information. ** *`parent_task_id` (Optional, string)*: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. @@ -11314,29 +12144,222 @@ client.tasks.list({ ... }) === text_structure [discrete] ==== find_field_structure -Finds the structure of a text field in an index. +Find the structure of a text field. +Find the structure of a text field in an Elasticsearch index. {ref}/find-field-structure.html[Endpoint documentation] [source,ts] ---- -client.textStructure.findFieldStructure() ----- - +client.textStructure.findFieldStructure({ field, index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`field` (string)*: The field that should be analyzed. +** *`index` (string)*: The name of the index that contains the analyzed field. +** *`column_names` (Optional, string)*: If `format` is set to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header row, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`documents_to_sample` (Optional, number)*: The number of documents to include in the structural analysis. +The minimum value is 2. +** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. +The intention in that situation is that a user who knows the meanings will rename the fields before using them. +** *`explain` (Optional, boolean)*: If true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is false. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. [discrete] ==== find_message_structure -Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. +Find the structure of text messages. +Find the structure of a list of text messages. +The messages must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. {ref}/find-message-structure.html[Endpoint documentation] [source,ts] ---- -client.textStructure.findMessageStructure() ----- - +client.textStructure.findMessageStructure({ messages }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`messages` (string[])*: The list of messages you want to analyze. +** *`column_names` (Optional, string)*: If the format is `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you the format is `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is false. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. [discrete] ==== find_structure -Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. +Find the structure of a text file. +The text file must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. +It must, however, be text; binary text formats are not currently supported. +The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. + +The response from the API contains: + +* A couple of messages from the beginning of the text. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. {ref}/find-structure.html[Endpoint documentation] [source,ts] @@ -11350,10 +12373,11 @@ client.textStructure.findStructure({ ... }) * *Request (object):* ** *`text_files` (Optional, TJsonDocument[])* ** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. -** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", etc. +** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. ** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. ** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). ** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. ** *`format` (Optional, string)*: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. ** *`grok_pattern` (Optional, string)*: If you have set format to semi_structured_text, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the timestamp_field parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If grok_pattern is not specified, the structure finder creates a Grok pattern. ** *`has_header_row` (Optional, boolean)*: If you have set format to delimited, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. @@ -11361,13 +12385,15 @@ client.textStructure.findStructure({ ... }) ** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2; If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. ** *`quote` (Optional, string)*: If you have set format to delimited, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote ("). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. ** *`should_trim_fields` (Optional, boolean)*: If you have set format to delimited, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. -** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis make take. If the analysis is still running when the timeout expires then it will be aborted. +** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. ** *`timestamp_field` (Optional, string)*: Optional parameter to specify the timestamp field in the file ** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. [discrete] ==== test_grok_pattern -Tests a Grok pattern on some text. +Test a Grok pattern. +Test a Grok pattern on one or more lines of text. +The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. {ref}/test-grok-pattern.html[Endpoint documentation] [source,ts] @@ -11733,12 +12759,20 @@ timeout expires, the request fails and returns an error. [discrete] ==== upgrade_transforms -Upgrades all transforms. -This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It -also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not -affect the source and destination indices. The upgrade also does not affect the roles that transforms use when -Elasticsearch security features are enabled; the role used to read source data and write to the destination index -remains unchanged. +Upgrade all transforms. +Transforms are compatible across minor versions and between supported major versions. +However, over time, the format of transform configuration information may change. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. +It also cleans up the internal data structures that store the transform state and checkpoints. +The upgrade does not affect the source and destination indices. +The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. +Resolve the issue then re-run the process again. +A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. +You may want to perform a recent cluster backup prior to the upgrade. {ref}/upgrade-transforms.html[Endpoint documentation] [source,ts] @@ -11758,7 +12792,13 @@ returns an error. === watcher [discrete] ==== ack_watch -Acknowledges a watch, manually throttling the execution of the watch's actions. +Acknowledge a watch. +Acknowledging a watch enables you to manually throttle the execution of the watch's actions. + +The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. + +IMPORTANT: If the specified watch is currently being executed, this API will return an error +The reason for this behavior is to prevent overwriting the watch status from a watch execution. {ref}/watcher-api-ack-watch.html[Endpoint documentation] [source,ts] @@ -11775,7 +12815,8 @@ client.watcher.ackWatch({ watch_id }) [discrete] ==== activate_watch -Activates a currently inactive watch. +Activate a watch. +A watch can be either active or inactive. {ref}/watcher-api-activate-watch.html[Endpoint documentation] [source,ts] @@ -11791,7 +12832,8 @@ client.watcher.activateWatch({ watch_id }) [discrete] ==== deactivate_watch -Deactivates a currently active watch. +Deactivate a watch. +A watch can be either active or inactive. {ref}/watcher-api-deactivate-watch.html[Endpoint documentation] [source,ts] @@ -11807,7 +12849,14 @@ client.watcher.deactivateWatch({ watch_id }) [discrete] ==== delete_watch -Removes a watch from Watcher. +Delete a watch. +When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. + +Deleting a watch does not delete any watch execution records related to this watch from the watch history. + +IMPORTANT: Deleting a watch must be done by using only this API. +Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API +When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. {ref}/watcher-api-delete-watch.html[Endpoint documentation] [source,ts] @@ -11823,8 +12872,15 @@ client.watcher.deleteWatch({ id }) [discrete] ==== execute_watch +Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. -For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. + +For testing and debugging purposes, you also have fine-grained control on how the watch runs. +You can run the watch without running all of its actions or alternatively by simulating them. +You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. + +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. +This serves as great tool for testing and debugging your watches prior to adding them to Watcher. {ref}/watcher-api-execute-watch.html[Endpoint documentation] [source,ts] @@ -11859,7 +12915,7 @@ client.watcher.getSettings() [discrete] ==== get_watch -Retrieves a watch by its ID. +Get a watch. {ref}/watcher-api-get-watch.html[Endpoint documentation] [source,ts] @@ -11875,7 +12931,18 @@ client.watcher.getWatch({ id }) [discrete] ==== put_watch -Creates a new watch, or updates an existing one. +Create or update a watch. +When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. +Typically for the `schedule` trigger, the scheduler is the trigger engine. + +IMPORTANT: You must use Kibana or this API to create a watch. +Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. +If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. + +When you add a watch you can also define its initial active state by setting the *active* parameter. + +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. +If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. {ref}/watcher-api-put-watch.html[Endpoint documentation] [source,ts] @@ -11902,7 +12969,8 @@ client.watcher.putWatch({ id }) [discrete] ==== query_watches -Retrieves stored watches. +Query watches. +Get all registered watches in a paginated manner and optionally filter watches by a query. {ref}/watcher-api-query-watches.html[Endpoint documentation] [source,ts] @@ -11922,7 +12990,8 @@ client.watcher.queryWatches({ ... }) [discrete] ==== start -Starts Watcher if it is not already running. +Start the watch service. +Start the Watcher service if it is not already running. {ref}/watcher-api-start.html[Endpoint documentation] [source,ts] @@ -11933,7 +13002,7 @@ client.watcher.start() [discrete] ==== stats -Retrieves the current Watcher metrics. +Get Watcher statistics. {ref}/watcher-api-stats.html[Endpoint documentation] [source,ts] @@ -11950,7 +13019,8 @@ client.watcher.stats({ ... }) [discrete] ==== stop -Stops Watcher if it is running. +Stop the watch service. +Stop the Watcher service if it is running. {ref}/watcher-api-stop.html[Endpoint documentation] [source,ts] @@ -11974,7 +13044,12 @@ client.watcher.updateSettings() === xpack [discrete] ==== info -Provides general information about the installed X-Pack features. +Get information. +The information provided by the API includes: + +* Build information including the build number and timestamp. +* License information about the currently installed license. +* Feature information for the features that are currently enabled and available under the current license. {ref}/info-api.html[Endpoint documentation] [source,ts] @@ -11992,7 +13067,9 @@ client.xpack.info({ ... }) [discrete] ==== usage -This API provides information about which features are currently enabled and available under the current license and some usage statistics. +Get usage information. +Get information about the features that are currently enabled and available under the current license. +The API also provides some usage statistics. {ref}/usage-api.html[Endpoint documentation] [source,ts] diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index f1521cf50..67919eac1 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -45,7 +45,7 @@ export default class Ccr { } /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-delete-auto-follow-pattern.html | Elasticsearch API documentation} */ async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Ccr { } /** - * Creates a new follower index configured to follow the referenced leader index. + * Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-put-follow.html | Elasticsearch API documentation} */ async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -121,7 +121,7 @@ export default class Ccr { } /** - * Retrieves information about all follower indices, including parameters and status for each follower index + * Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-follow-info.html | Elasticsearch API documentation} */ async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -153,7 +153,7 @@ export default class Ccr { } /** - * Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. + * Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-follow-stats.html | Elasticsearch API documentation} */ async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -185,7 +185,7 @@ export default class Ccr { } /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-forget-follower.html | Elasticsearch API documentation} */ async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -229,7 +229,7 @@ export default class Ccr { } /** - * Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-auto-follow-pattern.html | Elasticsearch API documentation} */ async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -269,7 +269,7 @@ export default class Ccr { } /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-pause-auto-follow-pattern.html | Elasticsearch API documentation} */ async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -301,7 +301,7 @@ export default class Ccr { } /** - * Pauses a follower index. The follower index will not fetch any additional operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-pause-follow.html | Elasticsearch API documentation} */ async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -333,7 +333,7 @@ export default class Ccr { } /** - * Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-put-auto-follow-pattern.html | Elasticsearch API documentation} */ async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -377,7 +377,7 @@ export default class Ccr { } /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-resume-auto-follow-pattern.html | Elasticsearch API documentation} */ async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -409,7 +409,7 @@ export default class Ccr { } /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-resume-follow.html | Elasticsearch API documentation} */ async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -453,7 +453,7 @@ export default class Ccr { } /** - * Gets all stats related to cross-cluster replication. + * Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -483,7 +483,7 @@ export default class Ccr { } /** - * Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-unfollow.html | Elasticsearch API documentation} */ async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index d4d523926..d3c805129 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -45,7 +45,7 @@ export default class Cluster { } /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-allocation-explain.html | Elasticsearch API documentation} */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -119,7 +119,7 @@ export default class Cluster { } /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/voting-config-exclusions.html | Elasticsearch API documentation} */ async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -221,7 +221,7 @@ export default class Cluster { } /** - * Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined. + * Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-get-settings.html | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -251,7 +251,7 @@ export default class Cluster { } /** - * The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. + * Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-health.html | Elasticsearch API documentation} */ async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -323,7 +323,7 @@ export default class Cluster { } /** - * Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-pending.html | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -353,7 +353,7 @@ export default class Cluster { } /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/voting-config-exclusions.html | Elasticsearch API documentation} */ async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -427,7 +427,7 @@ export default class Cluster { } /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-update-settings.html | Elasticsearch API documentation} */ async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -469,7 +469,7 @@ export default class Cluster { } /** - * The cluster remote info API allows you to retrieve all of the configured remote cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias. + * Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-remote-info.html | Elasticsearch API documentation} */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -499,7 +499,7 @@ export default class Cluster { } /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-reroute.html | Elasticsearch API documentation} */ async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -541,7 +541,7 @@ export default class Cluster { } /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-state.html | Elasticsearch API documentation} */ async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -585,7 +585,7 @@ export default class Cluster { } /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 6e2e4de9a..4ac485811 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -462,22 +462,22 @@ export default class Connector { } /** - * Checks in a connector sync job (refreshes 'last_seen'). + * Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/check-in-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobCheckIn (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -494,22 +494,34 @@ export default class Connector { } /** - * Claims a connector sync job. + * Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/claim-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobClaim (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -558,22 +570,34 @@ export default class Connector { } /** - * Sets an error for a connector sync job. + * Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/set-connector-sync-job-error-api.html | Elasticsearch API documentation} */ - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async syncJobError (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_sync_job_id'] + const acceptedBody: string[] = ['error'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -889,22 +913,34 @@ export default class Connector { } /** - * Updates the connector features in the connector document. + * Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-features-api.html | Elasticsearch API documentation} */ - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async updateFeatures (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['features'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 5bfe953dc..52a32a4f9 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -45,7 +45,7 @@ export default class Features { } /** - * Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot + * Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-features-api.html | Elasticsearch API documentation} */ async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -75,7 +75,7 @@ export default class Features { } /** - * Resets the internal state of features, usually by deleting system indices + * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 7094a1e4b..8e5041fe7 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/health-api.html | Elasticsearch API documentation} */ export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index d570a95ab..dc1f6dfb4 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -45,7 +45,7 @@ export default class Ilm { } /** - * Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Ilm { } /** - * Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-explain-lifecycle.html | Elasticsearch API documentation} */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class Ilm { } /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-get-lifecycle.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -149,7 +149,7 @@ export default class Ilm { } /** - * Retrieves the current index lifecycle management (ILM) status. + * Get the ILM status. Get the current index lifecycle management status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-get-status.html | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -179,7 +179,7 @@ export default class Ilm { } /** - * Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -221,7 +221,7 @@ export default class Ilm { } /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-move-to-step.html | Elasticsearch API documentation} */ async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -265,7 +265,7 @@ export default class Ilm { } /** - * Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. + * Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-put-lifecycle.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -309,7 +309,7 @@ export default class Ilm { } /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-remove-policy.html | Elasticsearch API documentation} */ async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -341,7 +341,7 @@ export default class Ilm { } /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-retry-policy.html | Elasticsearch API documentation} */ async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -373,7 +373,7 @@ export default class Ilm { } /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -403,7 +403,7 @@ export default class Ilm { } /** - * Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 1d8cdd5c0..34f2214e0 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -130,7 +130,7 @@ export default class Indices { } /** - * Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clearcache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -170,7 +170,7 @@ export default class Indices { } /** - * Clones an existing index. + * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clone-index.html | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -215,7 +215,7 @@ export default class Indices { } /** - * Closes an index. + * Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-close.html | Elasticsearch API documentation} */ async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -563,7 +563,7 @@ export default class Indices { } /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-disk-usage.html | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -595,7 +595,7 @@ export default class Indices { } /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-downsample-data-stream.html | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -769,7 +769,7 @@ export default class Indices { } /** - * Get the status for a data stream lifecycle. Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-explain-lifecycle.html | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -801,7 +801,7 @@ export default class Indices { } /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -833,7 +833,7 @@ export default class Indices { } /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-flush.html | Elasticsearch API documentation} */ async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -873,7 +873,7 @@ export default class Indices { } /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1376,7 +1376,7 @@ export default class Indices { } /** - * Promotes a data stream from a replicated data stream managed by CCR to a regular data stream + * Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html | Elasticsearch API documentation} */ async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1636,7 +1636,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1680,7 +1680,7 @@ export default class Indices { } /** - * Returns information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. + * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1760,7 +1760,7 @@ export default class Indices { } /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-reload-analyzers.html | Elasticsearch API documentation} */ async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1792,7 +1792,7 @@ export default class Indices { } /** - * Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1908,7 +1908,7 @@ export default class Indices { } /** - * Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html | Elasticsearch API documentation} */ async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1948,7 +1948,7 @@ export default class Indices { } /** - * Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. + * Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: * The node on which each replica shard exists. * The allocation ID for each replica shard. * A unique ID for each replica shard. * Any errors encountered while opening the shard index or from an earlier failure. By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html | Elasticsearch API documentation} */ async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1988,7 +1988,7 @@ export default class Indices { } /** - * Shrinks an existing index into a new index with fewer primary shards. + * Shrink an index. Shrink an index into a new index with fewer primary shards. Before you can shrink an index: * The index must be read-only. * A copy of every shard in the index must reside on the same node. * The index must have a green health status. To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation. The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. A shrink operation: * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shrink-index.html | Elasticsearch API documentation} */ async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2117,7 +2117,7 @@ export default class Indices { } /** - * Splits an existing index into a new index with more primary shards. + * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-split-index.html | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2162,7 +2162,7 @@ export default class Indices { } /** - * Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices. By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2209,7 +2209,7 @@ export default class Indices { } /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/unfreeze-index-api.html | Elasticsearch API documentation} */ async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index f9c1d3e61..65bf2e0ad 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -181,7 +181,7 @@ export default class Inference { } /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-inference-api.html | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index d898302c2..8814df08b 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -77,22 +77,22 @@ export default class Ingest { } /** - * Deletes an ip location database configuration + * Deletes an IP location database configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-ip-location-database-api.html | Elasticsearch API documentation} */ - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -211,13 +211,13 @@ export default class Ingest { } /** - * Returns the specified ip location database configuration + * Returns information about one or more IP location database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ip-location-database-api.html | Elasticsearch API documentation} */ - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] const querystring: Record = {} const body = undefined @@ -227,6 +227,7 @@ export default class Ingest { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -364,22 +365,27 @@ export default class Ingest { } /** - * Puts the configuration for a ip location database to be downloaded + * Returns information about one or more IP location database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-ip-location-database-api.html | Elasticsearch API documentation} */ - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putIpLocationDatabase (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] + const acceptedBody: string[] = ['configuration'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + let body: any = params.body ?? undefined - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 2a9dad240..0c905190b 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -45,7 +45,7 @@ export default class License { } /** - * Deletes licensing information for the cluster + * Delete the license. When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-license.html | Elasticsearch API documentation} */ async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -75,7 +75,7 @@ export default class License { } /** - * Get license information. Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-license.html | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -105,7 +105,7 @@ export default class License { } /** - * Retrieves information about the status of the basic license. + * Get the basic license status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-basic-status.html | Elasticsearch API documentation} */ async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -135,7 +135,7 @@ export default class License { } /** - * Retrieves information about the status of the trial license. + * Get the trial status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-trial-status.html | Elasticsearch API documentation} */ async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -165,7 +165,7 @@ export default class License { } /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-license.html | Elasticsearch API documentation} */ async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -207,7 +207,7 @@ export default class License { } /** - * The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + * Start a basic license. Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-basic.html | Elasticsearch API documentation} */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -237,7 +237,7 @@ export default class License { } /** - * The start trial API enables you to start a 30-day trial, which gives access to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription features. NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html | Elasticsearch API documentation} */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 8477f9085..2a9f98b9f 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -45,7 +45,7 @@ export default class Logstash { } /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Logstash { } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -117,7 +117,7 @@ export default class Logstash { } /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-put-pipeline.html | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 6e454fa6b..379cb575f 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -45,7 +45,7 @@ export default class Migration { } /** - * Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html | Elasticsearch API documentation} */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -85,7 +85,7 @@ export default class Migration { } /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -115,7 +115,7 @@ export default class Migration { } /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-feature-upgrade.html | Elasticsearch API documentation} */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 73ee0f319..08bac2455 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1953,7 +1953,7 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source', 'headers', 'version'] + const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -1997,7 +1997,7 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body @@ -2084,8 +2084,8 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 3e11f2be5..c578ae57c 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -45,7 +45,7 @@ export default class Monitoring { } /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send monitoring data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/monitor-elasticsearch-cluster.html | Elasticsearch API documentation} */ async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 8235710cb..28266d35f 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -45,7 +45,7 @@ export default class Nodes { } /** - * You can use this API to clear the archived repositories metering information in the cluster. + * Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-repositories-metering-archive-api.html | Elasticsearch API documentation} */ async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -78,7 +78,7 @@ export default class Nodes { } /** - * You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts. + * Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-repositories-metering-api.html | Elasticsearch API documentation} */ async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -110,7 +110,7 @@ export default class Nodes { } /** - * This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-hot-threads.html | Elasticsearch API documentation} */ async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -150,7 +150,7 @@ export default class Nodes { } /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core settings for cluster nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-info.html | Elasticsearch API documentation} */ async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -197,7 +197,7 @@ export default class Nodes { } /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation} */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -249,7 +249,7 @@ export default class Nodes { } /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -303,7 +303,7 @@ export default class Nodes { } /** - * Returns information on the usage of features. + * Get feature usage information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-usage.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index e18fd9222..5c115c451 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Ping the cluster. Returns whether the cluster is running. + * Ping the cluster. Get information about whether the cluster is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/index.html | Elasticsearch API documentation} */ export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index f615d0645..df3aafe6c 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -45,7 +45,7 @@ export default class Rollup { } /** - * Deletes an existing rollup job. + * Delete a rollup job. A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-delete-job.html | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Rollup { } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of rollup jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -117,7 +117,7 @@ export default class Rollup { } /** - * Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: 1. Does this index have associated rollup data somewhere in the cluster? 2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html | Elasticsearch API documentation} */ async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -157,7 +157,7 @@ export default class Rollup { } /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). + * Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-index-caps.html | Elasticsearch API documentation} */ async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -189,7 +189,7 @@ export default class Rollup { } /** - * Creates a rollup job. + * Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-put-job.html | Elasticsearch API documentation} */ async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -233,7 +233,7 @@ export default class Rollup { } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -277,7 +277,7 @@ export default class Rollup { } /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-start-job.html | Elasticsearch API documentation} */ async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -309,7 +309,7 @@ export default class Rollup { } /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html | Elasticsearch API documentation} */ async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index 2e022e933..0316026b0 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -181,7 +181,7 @@ export default class SearchApplication { } /** - * Returns the existing search applications. + * Get search applications. Get information about search applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-search-applications.html | Elasticsearch API documentation} */ async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -313,22 +313,34 @@ export default class SearchApplication { } /** - * Renders a query for given search application search parameters + * Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-application-render-query.html | Elasticsearch API documentation} */ - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async renderQuery (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] + const acceptedBody: string[] = ['params'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 11cb097d9..013ef8607 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -45,7 +45,7 @@ export default class SearchableSnapshots { } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially mounted indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -85,7 +85,7 @@ export default class SearchableSnapshots { } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -125,7 +125,7 @@ export default class SearchableSnapshots { } /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-mount-snapshot.html | Elasticsearch API documentation} */ async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -170,7 +170,7 @@ export default class SearchableSnapshots { } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-apis.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index cf83485f3..c54e4d94c 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -45,7 +45,7 @@ export default class Shutdown { } /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Shutdown { } /** - * Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -117,7 +117,7 @@ export default class Shutdown { } /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/current | Elasticsearch API documentation} */ async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 13c0ebe15..24886c419 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -45,7 +45,7 @@ export default class Slm { } /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-delete-policy.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Slm { } /** - * Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-execute-lifecycle.html | Elasticsearch API documentation} */ async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -109,7 +109,7 @@ export default class Slm { } /** - * Deletes any snapshots that are expired according to the policy's retention rules. + * Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-execute-retention.html | Elasticsearch API documentation} */ async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -139,7 +139,7 @@ export default class Slm { } /** - * Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-policy.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -179,7 +179,7 @@ export default class Slm { } /** - * Returns global and policy-level statistics about actions taken by snapshot lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-stats.html | Elasticsearch API documentation} */ async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -209,7 +209,7 @@ export default class Slm { } /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-status.html | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -239,7 +239,7 @@ export default class Slm { } /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-put-policy.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -283,7 +283,7 @@ export default class Slm { } /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -313,7 +313,7 @@ export default class Slm { } /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index fa39128ed..a7b8196c0 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -45,7 +45,7 @@ export default class Snapshot { } /** - * Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html | Elasticsearch API documentation} */ async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -77,7 +77,7 @@ export default class Snapshot { } /** - * Clones indices from one snapshot into another snapshot in the same repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -123,7 +123,7 @@ export default class Snapshot { } /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -168,7 +168,7 @@ export default class Snapshot { } /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -205,7 +205,7 @@ export default class Snapshot { } /** - * Deletes one or more snapshots. + * Delete snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -238,7 +238,7 @@ export default class Snapshot { } /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -270,7 +270,7 @@ export default class Snapshot { } /** - * Returns information about a snapshot. + * Get snapshot information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -303,7 +303,7 @@ export default class Snapshot { } /** - * Returns information about a repository. + * Get snapshot repository information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -375,7 +375,7 @@ export default class Snapshot { } /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -407,7 +407,7 @@ export default class Snapshot { } /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -452,7 +452,7 @@ export default class Snapshot { } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -496,7 +496,7 @@ export default class Snapshot { } /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html | Elasticsearch API documentation} */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index 885facc08..947721b9f 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -45,7 +45,7 @@ export default class Tasks { } /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html | Elasticsearch API documentation} */ async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -85,7 +85,7 @@ export default class Tasks { } /** - * Get task information. Returns information about the tasks currently executing in the cluster. + * Get task information. Get information about a task currently running in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html | Elasticsearch API documentation} */ async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -117,7 +117,7 @@ export default class Tasks { } /** - * The task management API returns information about tasks currently executing on one or more nodes in the cluster. + * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html | Elasticsearch API documentation} */ async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index f67eb2080..93cd23ceb 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -45,22 +45,22 @@ export default class TextStructure { } /** - * Finds the structure of a text field in an index. + * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-field-structure.html | Elasticsearch API documentation} */ - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async findFieldStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] const querystring: Record = {} const body = undefined - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -74,22 +74,34 @@ export default class TextStructure { } /** - * Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. + * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-message-structure.html | Elasticsearch API documentation} */ - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async findMessageStructure (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = [] + const acceptedBody: string[] = ['messages'] const querystring: Record = {} - const body = undefined + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body') { + // @ts-expect-error querystring[key] = params[key] } } @@ -103,7 +115,7 @@ export default class TextStructure { } /** - * Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. + * Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: * A couple of messages from the beginning of the text. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-structure.html | Elasticsearch API documentation} */ async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -137,7 +149,7 @@ export default class TextStructure { } /** - * Tests a Grok pattern on some text. + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-grok-pattern.html | Elasticsearch API documentation} */ async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 56029c0ba..b51bfbc12 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -446,7 +446,7 @@ export default class Transform { } /** - * Upgrades all transforms. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + * Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/upgrade-transforms.html | Elasticsearch API documentation} */ async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 1feeac116..5e5056706 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -45,7 +45,7 @@ export default class Watcher { } /** - * Acknowledges a watch, manually throttling the execution of the watch's actions. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-ack-watch.html | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -85,7 +85,7 @@ export default class Watcher { } /** - * Activates a currently inactive watch. + * Activate a watch. A watch can be either active or inactive. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-activate-watch.html | Elasticsearch API documentation} */ async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -117,7 +117,7 @@ export default class Watcher { } /** - * Deactivates a currently active watch. + * Deactivate a watch. A watch can be either active or inactive. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-deactivate-watch.html | Elasticsearch API documentation} */ async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -149,7 +149,7 @@ export default class Watcher { } /** - * Removes a watch from Watcher. + * Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-delete-watch.html | Elasticsearch API documentation} */ async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -181,7 +181,7 @@ export default class Watcher { } /** - * This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. + * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-execute-watch.html | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -262,7 +262,7 @@ export default class Watcher { } /** - * Retrieves a watch by its ID. + * Get a watch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-get-watch.html | Elasticsearch API documentation} */ async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -294,7 +294,7 @@ export default class Watcher { } /** - * Creates a new watch, or updates an existing one. + * Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. When you add a watch you can also define its initial active state by setting the *active* parameter. When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-put-watch.html | Elasticsearch API documentation} */ async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -338,7 +338,7 @@ export default class Watcher { } /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-query-watches.html | Elasticsearch API documentation} */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -380,7 +380,7 @@ export default class Watcher { } /** - * Starts Watcher if it is not already running. + * Start the watch service. Start the Watcher service if it is not already running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -410,7 +410,7 @@ export default class Watcher { } /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -450,7 +450,7 @@ export default class Watcher { } /** - * Stops Watcher if it is running. + * Stop the watch service. Stop the Watcher service if it is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 55e3d106a..873750de7 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -45,7 +45,7 @@ export default class Xpack { } /** - * Provides general information about the installed X-Pack features. + * Get information. The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/info-api.html | Elasticsearch API documentation} */ async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -75,7 +75,7 @@ export default class Xpack { } /** - * This API provides information about which features are currently enabled and available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.17/usage-api.html | Elasticsearch API documentation} */ async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index 3807a7c69..c87ca4668 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -58,6 +58,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { index?: IndexName + list_executed_pipelines?: boolean pipeline?: string refresh?: Refresh routing?: Routing @@ -67,6 +68,7 @@ export interface BulkRequest ex timeout?: Duration wait_for_active_shards?: WaitForActiveShards require_alias?: boolean + require_data_stream?: boolean operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } @@ -6849,6 +6851,8 @@ export interface CatAliasesAliasesRecord { export interface CatAliasesRequest extends CatCatRequestBase { name?: Names expand_wildcards?: ExpandWildcards + local?: boolean + master_timeout?: Duration } export type CatAliasesResponse = CatAliasesAliasesRecord[] @@ -6892,6 +6896,8 @@ export interface CatAllocationAllocationRecord { export interface CatAllocationRequest extends CatCatRequestBase { node_id?: NodeIds bytes?: Bytes + local?: boolean + master_timeout?: Duration } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -6908,6 +6914,8 @@ export interface CatComponentTemplatesComponentTemplate { export interface CatComponentTemplatesRequest extends CatCatRequestBase { name?: string + local?: boolean + master_timeout?: Duration } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] @@ -7011,15 +7019,12 @@ export interface CatHealthRequest extends CatCatRequestBase { export type CatHealthResponse = CatHealthHealthRecord[] -export interface CatHelpHelpRecord { - endpoint: string +export interface CatHelpRequest { } -export interface CatHelpRequest extends CatCatRequestBase { +export interface CatHelpResponse { } -export type CatHelpResponse = CatHelpHelpRecord[] - export interface CatIndicesIndicesRecord { health?: string h?: string @@ -7319,6 +7324,7 @@ export interface CatIndicesRequest extends CatCatRequestBase { include_unloaded_segments?: boolean pri?: boolean time?: TimeUnit + master_timeout?: Duration } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -7333,6 +7339,8 @@ export interface CatMasterMasterRecord { } export interface CatMasterRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration } export type CatMasterResponse = CatMasterMasterRecord[] @@ -7387,7 +7395,7 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { bytes?: Bytes h?: CatCatDfaColumns s?: CatCatDfaColumns - time?: Duration + time?: TimeUnit } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -7633,6 +7641,7 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { s?: CatCatTrainedModelsColumns from?: integer size?: integer + time?: TimeUnit } export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] @@ -7700,6 +7709,8 @@ export interface CatNodeattrsNodeAttributesRecord { } export interface CatNodeattrsRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] @@ -7978,6 +7989,8 @@ export interface CatNodesRequest extends CatCatRequestBase { bytes?: Bytes full_id?: boolean | string include_unloaded_segments?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatNodesResponse = CatNodesNodesRecord[] @@ -7994,6 +8007,9 @@ export interface CatPendingTasksPendingTasksRecord { } export interface CatPendingTasksRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -8013,6 +8029,9 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { + include_bootstrap?: boolean + local?: boolean + master_timeout?: Duration } export type CatPluginsResponse = CatPluginsPluginsRecord[] @@ -8080,6 +8099,7 @@ export interface CatRecoveryRequest extends CatCatRequestBase { active_only?: boolean bytes?: Bytes detailed?: boolean + time?: TimeUnit } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] @@ -8092,6 +8112,8 @@ export interface CatRepositoriesRepositoriesRecord { } export interface CatRepositoriesRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration } export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] @@ -8099,6 +8121,8 @@ export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes + local?: boolean + master_timeout?: Duration } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -8148,6 +8172,8 @@ export interface CatSegmentsSegmentsRecord { export interface CatShardsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes + master_timeout?: Duration + time?: TimeUnit } export type CatShardsResponse = CatShardsShardsRecord[] @@ -8370,6 +8396,8 @@ export interface CatShardsShardsRecord { export interface CatSnapshotsRequest extends CatCatRequestBase { repository?: Names ignore_unavailable?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] @@ -8411,8 +8439,11 @@ export interface CatSnapshotsSnapshotsRecord { export interface CatTasksRequest extends CatCatRequestBase { actions?: string[] detailed?: boolean - node_id?: string[] + nodes?: string[] parent_task_id?: string + time?: TimeUnit + timeout?: Duration + wait_for_completion?: boolean } export type CatTasksResponse = CatTasksTasksRecord[] @@ -8454,6 +8485,8 @@ export interface CatTasksTasksRecord { export interface CatTemplatesRequest extends CatCatRequestBase { name?: Name + local?: boolean + master_timeout?: Duration } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] @@ -8475,6 +8508,8 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names time?: TimeUnit + local?: boolean + master_timeout?: Duration } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -9851,12 +9886,36 @@ export interface ConnectorSyncJobCancelResponse { result: Result } +export interface ConnectorSyncJobCheckInRequest extends RequestBase { + connector_sync_job_id: Id +} + +export interface ConnectorSyncJobCheckInResponse { +} + +export interface ConnectorSyncJobClaimRequest extends RequestBase { + connector_sync_job_id: Id + sync_cursor?: any + worker_hostname: string +} + +export interface ConnectorSyncJobClaimResponse { +} + export interface ConnectorSyncJobDeleteRequest extends RequestBase { connector_sync_job_id: Id } export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase +export interface ConnectorSyncJobErrorRequest extends RequestBase { + connector_sync_job_id: Id + error: string +} + +export interface ConnectorSyncJobErrorResponse { +} + export interface ConnectorSyncJobGetRequest extends RequestBase { connector_sync_job_id: Id } @@ -9923,6 +9982,15 @@ export interface ConnectorUpdateErrorResponse { result: Result } +export interface ConnectorUpdateFeaturesRequest extends RequestBase { + connector_id: Id + features: ConnectorConnectorFeatures +} + +export interface ConnectorUpdateFeaturesResponse { + result: Result +} + export interface ConnectorUpdateFilteringRequest extends RequestBase { connector_id: Id filtering?: ConnectorFilteringConfig[] @@ -12653,7 +12721,16 @@ export interface IngestCsvProcessor extends IngestProcessorBase { export interface IngestDatabaseConfiguration { name: Name - maxmind: IngestMaxmind + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo +} + +export interface IngestDatabaseConfigurationFull { + web?: IngestWeb + local?: IngestLocal + name: Name + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { @@ -12805,6 +12882,9 @@ export interface IngestIpLocationProcessor extends IngestProcessorBase { download_database_on_pipeline_creation?: boolean } +export interface IngestIpinfo { +} + export interface IngestJoinProcessor extends IngestProcessorBase { field: Field separator: string @@ -12835,6 +12915,10 @@ export interface IngestKeyValueProcessor extends IngestProcessorBase { value_split: string } +export interface IngestLocal { + type: string +} + export interface IngestLowercaseProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean @@ -13042,6 +13126,9 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase { export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' +export interface IngestWeb { +} + export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { id: Ids master_timeout?: Duration @@ -13050,6 +13137,14 @@ export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase +export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { + id: Ids + master_timeout?: Duration + timeout?: Duration +} + +export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase + export interface IngestDeletePipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -13100,6 +13195,23 @@ export interface IngestGetGeoipDatabaseResponse { databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] } +export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { + id: Id + version: VersionNumber + modified_date_millis?: EpochTime + modified_date?: EpochTime + database: IngestDatabaseConfigurationFull +} + +export interface IngestGetIpLocationDatabaseRequest extends RequestBase { + id?: Ids + master_timeout?: Duration +} + +export interface IngestGetIpLocationDatabaseResponse { + databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[] +} + export interface IngestGetPipelineRequest extends RequestBase { id?: Id master_timeout?: Duration @@ -13125,6 +13237,15 @@ export interface IngestPutGeoipDatabaseRequest extends RequestBase { export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase +export interface IngestPutIpLocationDatabaseRequest extends RequestBase { + id: Id + master_timeout?: Duration + timeout?: Duration + configuration?: IngestDatabaseConfiguration +} + +export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase + export interface IngestPutPipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -13299,10 +13420,10 @@ export interface LicensePostStartTrialResponse { export interface LogstashPipeline { description: string last_modified: DateTime - pipeline_metadata: LogstashPipelineMetadata - username: string pipeline: string + pipeline_metadata: LogstashPipelineMetadata pipeline_settings: LogstashPipelineSettings + username: string } export interface LogstashPipelineMetadata { @@ -13397,6 +13518,12 @@ export interface MigrationPostFeatureUpgradeResponse { features: MigrationPostFeatureUpgradeMigrationFeature[] } +export interface MlAdaptiveAllocationsSettings { + enabled: boolean + min_number_of_allocations?: integer + max_number_of_allocations?: integer +} + export interface MlAnalysisConfig { bucket_span?: Duration categorization_analyzer?: MlCategorizationAnalyzer @@ -13427,7 +13554,7 @@ export interface MlAnalysisConfigRead { export interface MlAnalysisLimits { categorization_examples_limit?: long - model_memory_limit?: string + model_memory_limit?: ByteSize } export interface MlAnalysisMemoryLimit { @@ -13576,6 +13703,14 @@ export interface MlClassificationInferenceOptions { top_classes_results_field?: string } +export interface MlCommonTokenizationConfig { + do_lower_case?: boolean + max_sequence_length?: integer + span?: integer + truncate?: MlTokenizationTruncate + with_special_tokens?: boolean +} + export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' export type MlCustomSettings = any @@ -13665,15 +13800,16 @@ export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { assignment_explanation?: string datafeed_id: Id - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact state: MlDatafeedState - timing_stats: MlDatafeedTimingStats + timing_stats?: MlDatafeedTimingStats running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { bucket_count: long exponential_average_search_time_per_hour_ms: DurationValue + exponential_average_calculation_context?: MlExponentialAverageCalculationContext job_id: Id search_count: long total_search_time_ms: DurationValue @@ -13865,6 +14001,7 @@ export interface MlDataframeAnalyticsSummary { model_memory_limit?: string source: MlDataframeAnalyticsSource version?: VersionString + _meta?: Metadata } export interface MlDataframeEvaluationClassification { @@ -13970,21 +14107,48 @@ export interface MlDetectorRead { use_null?: boolean } -export interface MlDiscoveryNode { - attributes: Record +export interface MlDetectorUpdate { + detector_index: integer + description?: string + custom_rules?: MlDetectionRule[] +} + +export type MlDiscoveryNode = Partial> + +export interface MlDiscoveryNodeCompact { + name: Name ephemeral_id: Id id: Id - name: Name transport_address: TransportAddress + attributes: Record +} + +export interface MlDiscoveryNodeContent { + name?: Name + ephemeral_id: Id + transport_address: TransportAddress + external_id: string + attributes: Record + roles: string[] + version: VersionString + min_index_version: integer + max_index_version: integer } export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' +export interface MlExponentialAverageCalculationContext { + incremental_metric_value_ms: DurationValue + latest_timestamp?: EpochTime + previous_exponential_average_ms?: DurationValue +} + export interface MlFillMaskInferenceOptions { mask_token?: string num_top_classes?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { @@ -14172,7 +14336,7 @@ export interface MlJobStats { forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact open_time?: DateTime state: MlJobState timing_stats: MlJobTimingStats @@ -14192,6 +14356,23 @@ export interface MlJobTimingStats { export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' +export interface MlModelPackageConfig { + create_time?: EpochTime + description?: string + inference_config?: Record + metadata?: Metadata + minimum_version?: string + model_repository?: string + model_type?: string + packaged_model_id: Id + platform_architecture?: string + prefix_strings?: MlTrainedModelPrefixStrings + size?: ByteSize + sha256?: string + tags?: string[] + vocabulary_file?: string +} + export interface MlModelPlotConfig { annotations_enabled?: boolean enabled?: boolean @@ -14206,6 +14387,7 @@ export interface MlModelSizeStats { model_bytes: ByteSize model_bytes_exceeded?: ByteSize model_bytes_memory_limit?: ByteSize + output_memory_allocator_bytes?: ByteSize peak_model_bytes?: ByteSize assignment_memory_basis?: string result_type: string @@ -14255,20 +14437,11 @@ export interface MlNerInferenceUpdateOptions { results_field?: string } -export interface MlNlpBertTokenizationConfig { - do_lower_case?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer +export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig { } -export interface MlNlpRobertaTokenizationConfig { +export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { add_prefix_space?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer } export interface MlNlpTokenizationUpdateOptions { @@ -14292,7 +14465,7 @@ export interface MlOverallBucket { overall_score: double result_type: string timestamp: EpochTime - timestamp_string: DateTime + timestamp_string?: DateTime } export interface MlOverallBucketJob { @@ -14380,6 +14553,7 @@ export interface MlTextEmbeddingInferenceOptions { embedding_size?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { @@ -14390,6 +14564,7 @@ export interface MlTextEmbeddingInferenceUpdateOptions { export interface MlTextExpansionInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { @@ -14404,6 +14579,7 @@ export interface MlTimingStats { export interface MlTokenizationConfigContainer { bert?: MlNlpBertTokenizationConfig + bert_ja?: MlNlpBertTokenizationConfig mpnet?: MlNlpBertTokenizationConfig roberta?: MlNlpRobertaTokenizationConfig } @@ -14434,27 +14610,31 @@ export interface MlTotalFeatureImportanceStatistics { } export interface MlTrainedModelAssignment { + adaptive_allocations?: MlAdaptiveAllocationsSettings | null assignment_state: MlDeploymentAssignmentState max_assigned_allocations?: integer + reason?: string routing_table: Record start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters } export interface MlTrainedModelAssignmentRoutingTable { - reason: string + reason?: string routing_state: MlRoutingState current_allocations: integer target_allocations: integer } export interface MlTrainedModelAssignmentTaskParameters { - model_bytes: integer + model_bytes: ByteSize model_id: Id deployment_id: Id - cache_size: ByteSize + cache_size?: ByteSize number_of_allocations: integer priority: MlTrainingPriority + per_deployment_memory_bytes: ByteSize + per_allocation_memory_bytes: ByteSize queue_capacity: integer threads_per_allocation: integer } @@ -14477,6 +14657,7 @@ export interface MlTrainedModelConfig { license_level?: string metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize + model_package?: MlModelPackageConfig location?: MlTrainedModelLocation prefix_strings?: MlTrainedModelPrefixStrings } @@ -14499,36 +14680,45 @@ export interface MlTrainedModelDeploymentAllocationStatus { } export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms: DurationValue - error_count: integer - inference_count: integer - last_access: long - node: MlDiscoveryNode - number_of_allocations: integer - number_of_pending_requests: integer - rejection_execution_count: integer + average_inference_time_ms?: DurationValue + average_inference_time_ms_last_minute?: DurationValue + average_inference_time_ms_excluding_cache_hits?: DurationValue + error_count?: integer + inference_count?: long + inference_cache_hit_count?: long + inference_cache_hit_count_last_minute?: long + last_access?: EpochTime + node?: MlDiscoveryNode + number_of_allocations?: integer + number_of_pending_requests?: integer + peak_throughput_per_minute: long + rejection_execution_count?: integer routing_state: MlTrainedModelAssignmentRoutingTable - start_time: EpochTime - threads_per_allocation: integer - timeout_count: integer + start_time?: EpochTime + threads_per_allocation?: integer + throughput_last_minute: integer + timeout_count?: integer } export interface MlTrainedModelDeploymentStats { - allocation_status: MlTrainedModelDeploymentAllocationStatus + adaptive_allocations?: MlAdaptiveAllocationsSettings + allocation_status?: MlTrainedModelDeploymentAllocationStatus cache_size?: ByteSize deployment_id: Id - error_count: integer - inference_count: integer + error_count?: integer + inference_count?: integer model_id: Id nodes: MlTrainedModelDeploymentNodesStats[] - number_of_allocations: integer - queue_capacity: integer - rejected_execution_count: integer - reason: string + number_of_allocations?: integer + peak_throughput_per_minute: long + priority: MlTrainingPriority + queue_capacity?: integer + rejected_execution_count?: integer + reason?: string start_time: EpochTime - state: MlDeploymentAssignmentState - threads_per_allocation: integer - timeout_count: integer + state?: MlDeploymentAssignmentState + threads_per_allocation?: integer + timeout_count?: integer } export interface MlTrainedModelEntities { @@ -15162,6 +15352,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase { exclude_generated?: boolean from?: integer include?: MlInclude + include_model_definition?: boolean size?: integer tags?: string | string[] } @@ -15212,9 +15403,11 @@ export interface MlInfoDefaults { } export interface MlInfoLimits { - max_model_memory_limit?: string - effective_max_model_memory_limit: string - total_ml_memory: string + max_single_ml_node_processors?: integer + total_ml_processors?: integer + max_model_memory_limit?: ByteSize + effective_max_model_memory_limit?: ByteSize + total_ml_memory: ByteSize } export interface MlInfoNativeCode { @@ -15259,21 +15452,24 @@ export interface MlPostDataRequest extends RequestBase { } export interface MlPostDataResponse { - bucket_count: long - earliest_record_timestamp: long - empty_bucket_count: long + job_id: Id + processed_record_count: long + processed_field_count: long input_bytes: long input_field_count: long - input_record_count: long invalid_date_count: long - job_id: Id - last_data_time: integer - latest_record_timestamp: long missing_field_count: long out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long + empty_bucket_count: long sparse_bucket_count: long + bucket_count: long + earliest_record_timestamp?: EpochTime + latest_record_timestamp?: EpochTime + last_data_time?: EpochTime + latest_empty_bucket_timestamp?: EpochTime + latest_sparse_bucket_timestamp?: EpochTime + input_record_count: long + log_time?: EpochTime } export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { @@ -15334,6 +15530,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { description?: string dest: MlDataframeAnalyticsDestination max_num_threads?: integer + _meta?: Metadata model_memory_limit?: string source: MlDataframeAnalyticsSource headers?: HttpHeaders @@ -15350,6 +15547,7 @@ export interface MlPutDataFrameAnalyticsResponse { dest: MlDataframeAnalyticsDestination id: Id max_num_threads: integer + _meta?: Metadata model_memory_limit: string source: MlDataframeAnalyticsSource version: VersionString @@ -15362,6 +15560,8 @@ export interface MlPutDatafeedRequest extends RequestBase { ignore_throttled?: boolean ignore_unavailable?: boolean aggregations?: Record + /** @alias aggregations */ + aggs?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration @@ -15411,6 +15611,10 @@ export interface MlPutFilterResponse { export interface MlPutJobRequest extends RequestBase { job_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean allow_lazy_open?: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits @@ -15761,7 +15965,7 @@ export interface MlUpdateJobRequest extends RequestBase { renormalization_window_days?: long results_retention_days?: long groups?: string[] - detectors?: MlDetector[] + detectors?: MlDetectorUpdate[] per_partition_categorization?: MlPerPartitionCategorization } @@ -17183,6 +17387,14 @@ export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestB export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase +export interface SearchApplicationRenderQueryRequest extends RequestBase { + name: Name + params?: Record +} + +export interface SearchApplicationRenderQueryResponse { +} + export interface SearchApplicationSearchRequest extends RequestBase { name: Name typed_keys?: boolean @@ -19192,10 +19404,12 @@ export interface TasksListRequest extends RequestBase { export type TasksListResponse = TasksTaskListResponseBase -export interface TextStructureFindStructureFieldStat { +export type TextStructureEcsCompatibilityType = 'disabled' | 'v1' + +export interface TextStructureFieldStat { count: integer cardinality: integer - top_hits: TextStructureFindStructureTopHit[] + top_hits: TextStructureTopHit[] mean_value?: integer median_value?: integer max_value?: integer @@ -19204,6 +19418,81 @@ export interface TextStructureFindStructureFieldStat { latest?: string } +export type TextStructureFormatType = 'delimited' | 'ndjson' | 'semi_structured_text' | 'xml' + +export interface TextStructureTopHit { + count: long + value: any +} + +export interface TextStructureFindFieldStructureRequest extends RequestBase { + column_names?: string + delimiter?: string + documents_to_sample?: uint + ecs_compatibility?: TextStructureEcsCompatibilityType + explain?: boolean + field: Field + format?: TextStructureFormatType + grok_pattern?: GrokPattern + index: IndexName + quote?: string + should_trim_fields?: boolean + timeout?: Duration + timestamp_field?: Field + timestamp_format?: string +} + +export interface TextStructureFindFieldStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + +export interface TextStructureFindMessageStructureRequest extends RequestBase { + column_names?: string + delimiter?: string + ecs_compatibility?: TextStructureEcsCompatibilityType + explain?: boolean + format?: TextStructureFormatType + grok_pattern?: GrokPattern + quote?: string + should_trim_fields?: boolean + timeout?: Duration + timestamp_field?: Field + timestamp_format?: string + messages: string[] +} + +export interface TextStructureFindMessageStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + export interface TextStructureFindStructureRequest { charset?: string column_names?: string @@ -19228,7 +19517,7 @@ export interface TextStructureFindStructureResponse { has_header_row?: boolean has_byte_order_marker: boolean format: string - field_stats: Record + field_stats: Record sample_start: string num_messages_analyzed: integer mappings: MappingTypeMapping @@ -19248,11 +19537,6 @@ export interface TextStructureFindStructureResponse { ingest_pipeline: IngestPipelineConfig } -export interface TextStructureFindStructureTopHit { - count: long - value: any -} - export interface TextStructureTestGrokPatternMatchedField { match: string offset: integer @@ -20755,8 +21039,6 @@ export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names help?: boolean - local?: boolean - master_timeout?: Duration s?: Names v?: boolean } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 7a3306017..08df31aa9 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -58,6 +58,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { index?: IndexName + list_executed_pipelines?: boolean pipeline?: string refresh?: Refresh routing?: Routing @@ -67,6 +68,7 @@ export interface BulkRequest ex timeout?: Duration wait_for_active_shards?: WaitForActiveShards require_alias?: boolean + require_data_stream?: boolean /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ body?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } @@ -6929,6 +6931,8 @@ export interface CatAliasesAliasesRecord { export interface CatAliasesRequest extends CatCatRequestBase { name?: Names expand_wildcards?: ExpandWildcards + local?: boolean + master_timeout?: Duration } export type CatAliasesResponse = CatAliasesAliasesRecord[] @@ -6972,6 +6976,8 @@ export interface CatAllocationAllocationRecord { export interface CatAllocationRequest extends CatCatRequestBase { node_id?: NodeIds bytes?: Bytes + local?: boolean + master_timeout?: Duration } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -6988,6 +6994,8 @@ export interface CatComponentTemplatesComponentTemplate { export interface CatComponentTemplatesRequest extends CatCatRequestBase { name?: string + local?: boolean + master_timeout?: Duration } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] @@ -7091,15 +7099,12 @@ export interface CatHealthRequest extends CatCatRequestBase { export type CatHealthResponse = CatHealthHealthRecord[] -export interface CatHelpHelpRecord { - endpoint: string +export interface CatHelpRequest { } -export interface CatHelpRequest extends CatCatRequestBase { +export interface CatHelpResponse { } -export type CatHelpResponse = CatHelpHelpRecord[] - export interface CatIndicesIndicesRecord { health?: string h?: string @@ -7399,6 +7404,7 @@ export interface CatIndicesRequest extends CatCatRequestBase { include_unloaded_segments?: boolean pri?: boolean time?: TimeUnit + master_timeout?: Duration } export type CatIndicesResponse = CatIndicesIndicesRecord[] @@ -7413,6 +7419,8 @@ export interface CatMasterMasterRecord { } export interface CatMasterRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration } export type CatMasterResponse = CatMasterMasterRecord[] @@ -7467,7 +7475,7 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { bytes?: Bytes h?: CatCatDfaColumns s?: CatCatDfaColumns - time?: Duration + time?: TimeUnit } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] @@ -7713,6 +7721,7 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { s?: CatCatTrainedModelsColumns from?: integer size?: integer + time?: TimeUnit } export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] @@ -7780,6 +7789,8 @@ export interface CatNodeattrsNodeAttributesRecord { } export interface CatNodeattrsRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] @@ -8058,6 +8069,8 @@ export interface CatNodesRequest extends CatCatRequestBase { bytes?: Bytes full_id?: boolean | string include_unloaded_segments?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatNodesResponse = CatNodesNodesRecord[] @@ -8074,6 +8087,9 @@ export interface CatPendingTasksPendingTasksRecord { } export interface CatPendingTasksRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] @@ -8093,6 +8109,9 @@ export interface CatPluginsPluginsRecord { } export interface CatPluginsRequest extends CatCatRequestBase { + include_bootstrap?: boolean + local?: boolean + master_timeout?: Duration } export type CatPluginsResponse = CatPluginsPluginsRecord[] @@ -8160,6 +8179,7 @@ export interface CatRecoveryRequest extends CatCatRequestBase { active_only?: boolean bytes?: Bytes detailed?: boolean + time?: TimeUnit } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] @@ -8172,6 +8192,8 @@ export interface CatRepositoriesRepositoriesRecord { } export interface CatRepositoriesRequest extends CatCatRequestBase { + local?: boolean + master_timeout?: Duration } export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] @@ -8179,6 +8201,8 @@ export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes + local?: boolean + master_timeout?: Duration } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] @@ -8228,6 +8252,8 @@ export interface CatSegmentsSegmentsRecord { export interface CatShardsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes + master_timeout?: Duration + time?: TimeUnit } export type CatShardsResponse = CatShardsShardsRecord[] @@ -8450,6 +8476,8 @@ export interface CatShardsShardsRecord { export interface CatSnapshotsRequest extends CatCatRequestBase { repository?: Names ignore_unavailable?: boolean + master_timeout?: Duration + time?: TimeUnit } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] @@ -8491,8 +8519,11 @@ export interface CatSnapshotsSnapshotsRecord { export interface CatTasksRequest extends CatCatRequestBase { actions?: string[] detailed?: boolean - node_id?: string[] + nodes?: string[] parent_task_id?: string + time?: TimeUnit + timeout?: Duration + wait_for_completion?: boolean } export type CatTasksResponse = CatTasksTasksRecord[] @@ -8534,6 +8565,8 @@ export interface CatTasksTasksRecord { export interface CatTemplatesRequest extends CatCatRequestBase { name?: Name + local?: boolean + master_timeout?: Duration } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] @@ -8555,6 +8588,8 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names time?: TimeUnit + local?: boolean + master_timeout?: Duration } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] @@ -9964,12 +9999,42 @@ export interface ConnectorSyncJobCancelResponse { result: Result } +export interface ConnectorSyncJobCheckInRequest extends RequestBase { + connector_sync_job_id: Id +} + +export interface ConnectorSyncJobCheckInResponse { +} + +export interface ConnectorSyncJobClaimRequest extends RequestBase { + connector_sync_job_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + sync_cursor?: any + worker_hostname: string + } +} + +export interface ConnectorSyncJobClaimResponse { +} + export interface ConnectorSyncJobDeleteRequest extends RequestBase { connector_sync_job_id: Id } export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase +export interface ConnectorSyncJobErrorRequest extends RequestBase { + connector_sync_job_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + error: string + } +} + +export interface ConnectorSyncJobErrorResponse { +} + export interface ConnectorSyncJobGetRequest extends RequestBase { connector_sync_job_id: Id } @@ -10048,6 +10113,18 @@ export interface ConnectorUpdateErrorResponse { result: Result } +export interface ConnectorUpdateFeaturesRequest extends RequestBase { + connector_id: Id + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + features: ConnectorConnectorFeatures + } +} + +export interface ConnectorUpdateFeaturesResponse { + result: Result +} + export interface ConnectorUpdateFilteringRequest extends RequestBase { connector_id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ @@ -12881,7 +12958,16 @@ export interface IngestCsvProcessor extends IngestProcessorBase { export interface IngestDatabaseConfiguration { name: Name - maxmind: IngestMaxmind + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo +} + +export interface IngestDatabaseConfigurationFull { + web?: IngestWeb + local?: IngestLocal + name: Name + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { @@ -13033,6 +13119,9 @@ export interface IngestIpLocationProcessor extends IngestProcessorBase { download_database_on_pipeline_creation?: boolean } +export interface IngestIpinfo { +} + export interface IngestJoinProcessor extends IngestProcessorBase { field: Field separator: string @@ -13063,6 +13152,10 @@ export interface IngestKeyValueProcessor extends IngestProcessorBase { value_split: string } +export interface IngestLocal { + type: string +} + export interface IngestLowercaseProcessor extends IngestProcessorBase { field: Field ignore_missing?: boolean @@ -13270,6 +13363,9 @@ export interface IngestUserAgentProcessor extends IngestProcessorBase { export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' +export interface IngestWeb { +} + export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { id: Ids master_timeout?: Duration @@ -13278,6 +13374,14 @@ export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase +export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { + id: Ids + master_timeout?: Duration + timeout?: Duration +} + +export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase + export interface IngestDeletePipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -13328,6 +13432,23 @@ export interface IngestGetGeoipDatabaseResponse { databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] } +export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { + id: Id + version: VersionNumber + modified_date_millis?: EpochTime + modified_date?: EpochTime + database: IngestDatabaseConfigurationFull +} + +export interface IngestGetIpLocationDatabaseRequest extends RequestBase { + id?: Ids + master_timeout?: Duration +} + +export interface IngestGetIpLocationDatabaseResponse { + databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[] +} + export interface IngestGetPipelineRequest extends RequestBase { id?: Id master_timeout?: Duration @@ -13356,6 +13477,16 @@ export interface IngestPutGeoipDatabaseRequest extends RequestBase { export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase +export interface IngestPutIpLocationDatabaseRequest extends RequestBase { + id: Id + master_timeout?: Duration + timeout?: Duration + /** @deprecated The use of the 'body' key has been deprecated, use 'configuration' instead. */ + body?: IngestDatabaseConfiguration +} + +export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase + export interface IngestPutPipelineRequest extends RequestBase { id: Id master_timeout?: Duration @@ -13539,10 +13670,10 @@ export interface LicensePostStartTrialResponse { export interface LogstashPipeline { description: string last_modified: DateTime - pipeline_metadata: LogstashPipelineMetadata - username: string pipeline: string + pipeline_metadata: LogstashPipelineMetadata pipeline_settings: LogstashPipelineSettings + username: string } export interface LogstashPipelineMetadata { @@ -13638,6 +13769,12 @@ export interface MigrationPostFeatureUpgradeResponse { features: MigrationPostFeatureUpgradeMigrationFeature[] } +export interface MlAdaptiveAllocationsSettings { + enabled: boolean + min_number_of_allocations?: integer + max_number_of_allocations?: integer +} + export interface MlAnalysisConfig { bucket_span?: Duration categorization_analyzer?: MlCategorizationAnalyzer @@ -13668,7 +13805,7 @@ export interface MlAnalysisConfigRead { export interface MlAnalysisLimits { categorization_examples_limit?: long - model_memory_limit?: string + model_memory_limit?: ByteSize } export interface MlAnalysisMemoryLimit { @@ -13817,6 +13954,14 @@ export interface MlClassificationInferenceOptions { top_classes_results_field?: string } +export interface MlCommonTokenizationConfig { + do_lower_case?: boolean + max_sequence_length?: integer + span?: integer + truncate?: MlTokenizationTruncate + with_special_tokens?: boolean +} + export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' export type MlCustomSettings = any @@ -13906,15 +14051,16 @@ export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { assignment_explanation?: string datafeed_id: Id - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact state: MlDatafeedState - timing_stats: MlDatafeedTimingStats + timing_stats?: MlDatafeedTimingStats running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { bucket_count: long exponential_average_search_time_per_hour_ms: DurationValue + exponential_average_calculation_context?: MlExponentialAverageCalculationContext job_id: Id search_count: long total_search_time_ms: DurationValue @@ -14106,6 +14252,7 @@ export interface MlDataframeAnalyticsSummary { model_memory_limit?: string source: MlDataframeAnalyticsSource version?: VersionString + _meta?: Metadata } export interface MlDataframeEvaluationClassification { @@ -14211,21 +14358,48 @@ export interface MlDetectorRead { use_null?: boolean } -export interface MlDiscoveryNode { - attributes: Record +export interface MlDetectorUpdate { + detector_index: integer + description?: string + custom_rules?: MlDetectionRule[] +} + +export type MlDiscoveryNode = Partial> + +export interface MlDiscoveryNodeCompact { + name: Name ephemeral_id: Id id: Id - name: Name transport_address: TransportAddress + attributes: Record +} + +export interface MlDiscoveryNodeContent { + name?: Name + ephemeral_id: Id + transport_address: TransportAddress + external_id: string + attributes: Record + roles: string[] + version: VersionString + min_index_version: integer + max_index_version: integer } export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' +export interface MlExponentialAverageCalculationContext { + incremental_metric_value_ms: DurationValue + latest_timestamp?: EpochTime + previous_exponential_average_ms?: DurationValue +} + export interface MlFillMaskInferenceOptions { mask_token?: string num_top_classes?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { @@ -14413,7 +14587,7 @@ export interface MlJobStats { forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact open_time?: DateTime state: MlJobState timing_stats: MlJobTimingStats @@ -14433,6 +14607,23 @@ export interface MlJobTimingStats { export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' +export interface MlModelPackageConfig { + create_time?: EpochTime + description?: string + inference_config?: Record + metadata?: Metadata + minimum_version?: string + model_repository?: string + model_type?: string + packaged_model_id: Id + platform_architecture?: string + prefix_strings?: MlTrainedModelPrefixStrings + size?: ByteSize + sha256?: string + tags?: string[] + vocabulary_file?: string +} + export interface MlModelPlotConfig { annotations_enabled?: boolean enabled?: boolean @@ -14447,6 +14638,7 @@ export interface MlModelSizeStats { model_bytes: ByteSize model_bytes_exceeded?: ByteSize model_bytes_memory_limit?: ByteSize + output_memory_allocator_bytes?: ByteSize peak_model_bytes?: ByteSize assignment_memory_basis?: string result_type: string @@ -14496,20 +14688,11 @@ export interface MlNerInferenceUpdateOptions { results_field?: string } -export interface MlNlpBertTokenizationConfig { - do_lower_case?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer +export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig { } -export interface MlNlpRobertaTokenizationConfig { +export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { add_prefix_space?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer } export interface MlNlpTokenizationUpdateOptions { @@ -14533,7 +14716,7 @@ export interface MlOverallBucket { overall_score: double result_type: string timestamp: EpochTime - timestamp_string: DateTime + timestamp_string?: DateTime } export interface MlOverallBucketJob { @@ -14621,6 +14804,7 @@ export interface MlTextEmbeddingInferenceOptions { embedding_size?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { @@ -14631,6 +14815,7 @@ export interface MlTextEmbeddingInferenceUpdateOptions { export interface MlTextExpansionInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { @@ -14645,6 +14830,7 @@ export interface MlTimingStats { export interface MlTokenizationConfigContainer { bert?: MlNlpBertTokenizationConfig + bert_ja?: MlNlpBertTokenizationConfig mpnet?: MlNlpBertTokenizationConfig roberta?: MlNlpRobertaTokenizationConfig } @@ -14675,27 +14861,31 @@ export interface MlTotalFeatureImportanceStatistics { } export interface MlTrainedModelAssignment { + adaptive_allocations?: MlAdaptiveAllocationsSettings | null assignment_state: MlDeploymentAssignmentState max_assigned_allocations?: integer + reason?: string routing_table: Record start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters } export interface MlTrainedModelAssignmentRoutingTable { - reason: string + reason?: string routing_state: MlRoutingState current_allocations: integer target_allocations: integer } export interface MlTrainedModelAssignmentTaskParameters { - model_bytes: integer + model_bytes: ByteSize model_id: Id deployment_id: Id - cache_size: ByteSize + cache_size?: ByteSize number_of_allocations: integer priority: MlTrainingPriority + per_deployment_memory_bytes: ByteSize + per_allocation_memory_bytes: ByteSize queue_capacity: integer threads_per_allocation: integer } @@ -14718,6 +14908,7 @@ export interface MlTrainedModelConfig { license_level?: string metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize + model_package?: MlModelPackageConfig location?: MlTrainedModelLocation prefix_strings?: MlTrainedModelPrefixStrings } @@ -14740,36 +14931,45 @@ export interface MlTrainedModelDeploymentAllocationStatus { } export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms: DurationValue - error_count: integer - inference_count: integer - last_access: long - node: MlDiscoveryNode - number_of_allocations: integer - number_of_pending_requests: integer - rejection_execution_count: integer + average_inference_time_ms?: DurationValue + average_inference_time_ms_last_minute?: DurationValue + average_inference_time_ms_excluding_cache_hits?: DurationValue + error_count?: integer + inference_count?: long + inference_cache_hit_count?: long + inference_cache_hit_count_last_minute?: long + last_access?: EpochTime + node?: MlDiscoveryNode + number_of_allocations?: integer + number_of_pending_requests?: integer + peak_throughput_per_minute: long + rejection_execution_count?: integer routing_state: MlTrainedModelAssignmentRoutingTable - start_time: EpochTime - threads_per_allocation: integer - timeout_count: integer + start_time?: EpochTime + threads_per_allocation?: integer + throughput_last_minute: integer + timeout_count?: integer } export interface MlTrainedModelDeploymentStats { - allocation_status: MlTrainedModelDeploymentAllocationStatus + adaptive_allocations?: MlAdaptiveAllocationsSettings + allocation_status?: MlTrainedModelDeploymentAllocationStatus cache_size?: ByteSize deployment_id: Id - error_count: integer - inference_count: integer + error_count?: integer + inference_count?: integer model_id: Id nodes: MlTrainedModelDeploymentNodesStats[] - number_of_allocations: integer - queue_capacity: integer - rejected_execution_count: integer - reason: string + number_of_allocations?: integer + peak_throughput_per_minute: long + priority: MlTrainingPriority + queue_capacity?: integer + rejected_execution_count?: integer + reason?: string start_time: EpochTime - state: MlDeploymentAssignmentState - threads_per_allocation: integer - timeout_count: integer + state?: MlDeploymentAssignmentState + threads_per_allocation?: integer + timeout_count?: integer } export interface MlTrainedModelEntities { @@ -15445,6 +15645,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase { exclude_generated?: boolean from?: integer include?: MlInclude + include_model_definition?: boolean size?: integer tags?: string | string[] } @@ -15498,9 +15699,11 @@ export interface MlInfoDefaults { } export interface MlInfoLimits { - max_model_memory_limit?: string - effective_max_model_memory_limit: string - total_ml_memory: string + max_single_ml_node_processors?: integer + total_ml_processors?: integer + max_model_memory_limit?: ByteSize + effective_max_model_memory_limit?: ByteSize + total_ml_memory: ByteSize } export interface MlInfoNativeCode { @@ -15552,21 +15755,24 @@ export interface MlPostDataRequest extends RequestBase { } export interface MlPostDataResponse { - bucket_count: long - earliest_record_timestamp: long - empty_bucket_count: long + job_id: Id + processed_record_count: long + processed_field_count: long input_bytes: long input_field_count: long - input_record_count: long invalid_date_count: long - job_id: Id - last_data_time: integer - latest_record_timestamp: long missing_field_count: long out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long + empty_bucket_count: long sparse_bucket_count: long + bucket_count: long + earliest_record_timestamp?: EpochTime + latest_record_timestamp?: EpochTime + last_data_time?: EpochTime + latest_empty_bucket_timestamp?: EpochTime + latest_sparse_bucket_timestamp?: EpochTime + input_record_count: long + log_time?: EpochTime } export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { @@ -15638,6 +15844,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { description?: string dest: MlDataframeAnalyticsDestination max_num_threads?: integer + _meta?: Metadata model_memory_limit?: string source: MlDataframeAnalyticsSource headers?: HttpHeaders @@ -15655,6 +15862,7 @@ export interface MlPutDataFrameAnalyticsResponse { dest: MlDataframeAnalyticsDestination id: Id max_num_threads: integer + _meta?: Metadata model_memory_limit: string source: MlDataframeAnalyticsSource version: VersionString @@ -15669,6 +15877,8 @@ export interface MlPutDatafeedRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { aggregations?: Record + /** @alias aggregations */ + aggs?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration @@ -15722,6 +15932,10 @@ export interface MlPutFilterResponse { export interface MlPutJobRequest extends RequestBase { job_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { allow_lazy_open?: boolean @@ -15733,6 +15947,7 @@ export interface MlPutJobRequest extends RequestBase { data_description: MlDataDescription datafeed_config?: MlDatafeedConfig description?: string + job_id?: Id groups?: string[] model_plot_config?: MlModelPlotConfig model_snapshot_retention_days?: long @@ -16104,7 +16319,7 @@ export interface MlUpdateJobRequest extends RequestBase { renormalization_window_days?: long results_retention_days?: long groups?: string[] - detectors?: MlDetector[] + detectors?: MlDetectorUpdate[] per_partition_categorization?: MlPerPartitionCategorization } } @@ -17557,6 +17772,17 @@ export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestB export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase +export interface SearchApplicationRenderQueryRequest extends RequestBase { + name: Name + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + params?: Record + } +} + +export interface SearchApplicationRenderQueryResponse { +} + export interface SearchApplicationSearchRequest extends RequestBase { name: Name typed_keys?: boolean @@ -19686,10 +19912,12 @@ export interface TasksListRequest extends RequestBase { export type TasksListResponse = TasksTaskListResponseBase -export interface TextStructureFindStructureFieldStat { +export type TextStructureEcsCompatibilityType = 'disabled' | 'v1' + +export interface TextStructureFieldStat { count: integer cardinality: integer - top_hits: TextStructureFindStructureTopHit[] + top_hits: TextStructureTopHit[] mean_value?: integer median_value?: integer max_value?: integer @@ -19698,6 +19926,84 @@ export interface TextStructureFindStructureFieldStat { latest?: string } +export type TextStructureFormatType = 'delimited' | 'ndjson' | 'semi_structured_text' | 'xml' + +export interface TextStructureTopHit { + count: long + value: any +} + +export interface TextStructureFindFieldStructureRequest extends RequestBase { + column_names?: string + delimiter?: string + documents_to_sample?: uint + ecs_compatibility?: TextStructureEcsCompatibilityType + explain?: boolean + field: Field + format?: TextStructureFormatType + grok_pattern?: GrokPattern + index: IndexName + quote?: string + should_trim_fields?: boolean + timeout?: Duration + timestamp_field?: Field + timestamp_format?: string +} + +export interface TextStructureFindFieldStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + +export interface TextStructureFindMessageStructureRequest extends RequestBase { + column_names?: string + delimiter?: string + ecs_compatibility?: TextStructureEcsCompatibilityType + explain?: boolean + format?: TextStructureFormatType + grok_pattern?: GrokPattern + quote?: string + should_trim_fields?: boolean + timeout?: Duration + timestamp_field?: Field + timestamp_format?: string + /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ + body?: { + messages: string[] + } +} + +export interface TextStructureFindMessageStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + export interface TextStructureFindStructureRequest { charset?: string column_names?: string @@ -19723,7 +20029,7 @@ export interface TextStructureFindStructureResponse { has_header_row?: boolean has_byte_order_marker: boolean format: string - field_stats: Record + field_stats: Record sample_start: string num_messages_analyzed: integer mappings: MappingTypeMapping @@ -19743,11 +20049,6 @@ export interface TextStructureFindStructureResponse { ingest_pipeline: IngestPipelineConfig } -export interface TextStructureFindStructureTopHit { - count: long - value: any -} - export interface TextStructureTestGrokPatternMatchedField { match: string offset: integer @@ -21271,8 +21572,6 @@ export interface SpecUtilsCommonCatQueryParameters { format?: string h?: Names help?: boolean - local?: boolean - master_timeout?: Duration s?: Names v?: boolean }